diff --git "a/5324.jsonl" "b/5324.jsonl" new file mode 100644--- /dev/null +++ "b/5324.jsonl" @@ -0,0 +1,2059 @@ +{"seq_id":"15629187409","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom string import atoi\n\ninput_file=\"PEMS_all\"\noutput_folder=\"days\"\n\nline = None\nwith open(\"randperm\") as fin:\n line = fin.readline()\n\nlist = line.strip()[1:-1].split(' ')\n\nif not os.path.isdir(output_folder):\n os.makedirs(output_folder)\n\nwith open(input_file) as fin:\n os.chdir(output_folder)\n\n i=0\n line = fin.readline()\n while line:\n day = atoi(list[i])\n file = \"{0:03d}\".format(day)\n print(\"output: {}/{}\".format(output_folder, file))\n with open(file, 'w') as fout:\n fout.write(line)\n i=i+1\n line = fin.readline()","repo_name":"mahayash315/traffic","sub_path":"data/PEMS-SF/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"14817528240","text":"import bpy\nfrom bpy.props import PointerProperty, FloatProperty, EnumProperty\nfrom bpy.types import Operator, Panel, PropertyGroup\n\nbl_info = {\n \"name\": \"TexMix\",\n \"author\": \"Phani\",\n \"location\": \"N Panel\",\n \"version\": (1, 0),\n \"blender\": (3, 0, 0),\n \"description\": \"A Blender addon for blending two materials.\",\n \"category\": \"Material\"\n}\n\ndef material_items_callback(self, context):\n materials = [(mat.name, mat.name, \"\") for mat in bpy.data.materials]\n return materials\n\ndef create_node_tree_from_material(material):\n # Create a new node tree\n node_tree = bpy.data.node_groups.new(name=material.name + \"_NodeTree\", type='ShaderNodeTree')\n\n # Create a material output node\n output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')\n output_node.location = (0, 0)\n\n # Get the material's node tree\n material_node_tree = material.node_tree\n\n # Copy the nodes from the material's node tree to the new node tree\n for node in material_node_tree.nodes:\n if node.type == 'BSDF_PRINCIPLED' or node.type == 'OUTPUT_MATERIAL':\n new_node = node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')\n else:\n new_node = node_tree.nodes.new(type=node.type)\n new_node.location = node.location\n new_node.name = node.name\n\n # Copy the node inputs and outputs\n for input in node.inputs:\n new_input = new_node.inputs[input.name]\n input.copy(new_input)\n\n for output in node.outputs:\n new_output = new_node.outputs[output.name]\n output.copy(new_output)\n\n # Copy the links from the material's node tree to the new node tree\n for link in material_node_tree.links:\n node_tree.links.new(\n node_tree.nodes[link.from_node.name].outputs[link.from_socket.name],\n node_tree.nodes[link.to_node.name].inputs[link.to_socket.name]\n )\n\n # Connect the output node to the material's last node\n node_tree.links.new(\n node_tree.nodes[material_node_tree.nodes[-1].name].outputs['Shader'],\n output_node.inputs['Surface']\n )\n\n return node_tree\n\nclass MixOperator(bpy.types.Operator):\n \"\"\"Mix two materials based on the mix ratio\"\"\"\n bl_idname = \"texmix.mix_operator\"\n bl_label = \"Mix Operator\"\n\n def execute(self, context):\n # Get the selected materials\n material_1 = context.scene.texmix_props.material_1\n material_2 = context.scene.texmix_props.material_2\n\n if not material_1 or not material_2:\n self.report({'ERROR'}, \"Please select two valid materials to mix.\")\n return {'CANCELLED'}\n\n # Get the two selected materials by name\n material_1_obj = bpy.data.materials.get(material_1)\n material_2_obj = bpy.data.materials.get(material_2)\n\n if not material_1_obj or not material_2_obj:\n self.report({'ERROR'}, \"Please select two valid materials to mix.\")\n return {'CANCELLED'}\n\n # Create a new material for the mix\n mix_material = bpy.data.materials.new(name=\"MixMaterial\")\n mix_material.use_nodes = True\n mix_nodes = mix_material.node_tree.nodes\n mix_links = mix_material.node_tree.links\n\n # Create node trees from the selected materials\n node_tree_1 = create_node_tree_from_material(material_1_obj)\n node_tree_2 = create_node_tree_from_material(material_2_obj)\n\n # Create the mix node and add it to the node tree\n mix_node = mix_nodes.new(type='ShaderNodeMixShader')\n mix_node.location = (0, 0)\n\n # Set the mix ratio based on the operator property\n mix_node.inputs[0].default_value = context.scene.texmix_props.mix_ratio\n\n # Connect the two materials to the mix node\n mix_links.new(node_tree_1.outputs['Shader'], mix_node.inputs[1])\n mix_links.new(node_tree_2.outputs['Shader'], mix_node.inputs[2])\n\n # Connect the mix node to the output node\n mix_output_node = mix_nodes['Material Output']\n mix_links.new(mix_node.outputs['Shader'],\n mix_output_node.inputs['Surface'])\n\n # Apply the mix material to the active object\n context.object.active_material = mix_material\n\n return {'FINISHED'}\n\nclass MaterialSelectorPanel(bpy.types.Panel):\n bl_idname = \"MATERIAL_SELECTOR_PT_texmix\"\n bl_label = \"Material Selector\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_context = \"objectmode\"\n bl_category = \"TexMix\"\n\n def draw(self, context):\n layout = self.layout\n texmix_props = context.scene.texmix_props\n\n # Material selectors\n layout.prop(texmix_props, \"material_1\")\n layout.prop(texmix_props, \"material_2\")\n\n layout.separator()\n\n # Mix ratio slider\n layout.prop(texmix_props, \"mix_ratio\")\n\n layout.separator()\n\n # Mix button\n row = layout.row()\n row.operator(\"texmix.mix_operator\", text=\"Mix Materials\")\n\n # Apply button\n row = layout.row()\n row.operator(\"texmix.apply_operator\", text=\"Apply Material\")\n\nclass ApplyOperator(bpy.types.Operator):\n \"\"\"Apply the mixed material to the selected object and add the material to the list of materials\"\"\"\n bl_idname = \"texmix.apply_operator\"\n bl_label = \"Apply Operator\"\n\n def execute(self, context):\n # Get the active material\n mix_material = context.object.active_material\n\n # Give the material a unique name\n mix_material.name = mix_material.name + \"_\" + str(len(bpy.data.materials))\n\n # Apply the mix material to the active object\n context.object.active_material = mix_material\n\n return {'FINISHED'}\n\nclass TexMixProperties(PropertyGroup):\n material_1: EnumProperty(\n items=material_items_callback,\n name=\"Material 1\",\n description=\"Select the first material to mix.\"\n )\n material_2: EnumProperty(\n items=material_items_callback,\n name=\"Material 2\",\n description=\"Select the second material to mix.\"\n )\n mix_ratio: FloatProperty(\n name=\"Mix Ratio\",\n description=\"The ratio of the two materials to mix.\",\n default=0.5,\n min=0.0,\n max=1.0,\n subtype='FACTOR'\n )\n\ndef register():\n bpy.utils.register_class(TexMixProperties)\n bpy.utils.register_class(MaterialSelectorPanel)\n bpy.utils.register_class(MixOperator)\n bpy.utils.register_class(ApplyOperator)\n bpy.types.Scene.texmix_props = PointerProperty(type=TexMixProperties)\n\ndef unregister():\n bpy.utils.unregister_class(ApplyOperator)\n bpy.utils.unregister_class(MixOperator)\n bpy.utils.unregister_class(MaterialSelectorPanel)\n bpy.utils.unregister_class(TexMixProperties)\n del bpy.types.Scene.texmix_props\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"phaniblend/texmix","sub_path":"texmix/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41473469914","text":"def how_much_water(water, load, clothes):\n if clothes > (load *2):\n return \"Too much clothes\"\n if clothes < load:\n return \"Not enough clothes\"\n return round(water * 1.1 ** (clothes - load), 2)\n\nload = 10\nwater = 10\nclothes = 21\n\nhow_much_water(water, load, clothes)\n\n\n#round(decimalNumber, significantDigits)","repo_name":"ezstarr/CodeWarsSolutions","sub_path":"8kyu/how_much_water.py","file_name":"how_much_water.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22006983558","text":"import os,sys\nimport logging\nfrom celery import Celery, Task\nimport celery\n\nclass TCelery():\n\n tcelery = None\n\n def __init__(self, name: str, broker: str):\n self._name = name\n self._broker = broker\n #self._handle = Celery(self._name,\n self._handle = Celery( broker=self._broker,\n backend=self._broker)\n #self._handle = Celery()\n\n\n self._handle.conf.update(\n task_serializer='json',\n accept_content=['json'],\n result_serializer='json',\n timezone='Asia/Seoul',\n enable_utc=True,\n )\n\n\n @staticmethod\n def get_celery(name: str, broker: str) -> Celery:\n if TCelery.tcelery is None:\n TCelery.tcelery = TCelery(name=name, broker=broker)\n return TCelery.tcelery._handle\n\n\ndef get_celery() -> Celery:\n from tcelery import TCelery\n return TCelery.get_celery(name=\"main\",broker=\"redis://localhost:6379/1\")\n\n\ncelery_app = get_celery()\n\nimport testapi \n","repo_name":"swhors/flask_gunicorn_celery_example","sub_path":"tcelery.py","file_name":"tcelery.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5024123167","text":"# Programa - Controle de uma agenda de telefones\n\nagenda = []\n\n# Variável para marcar uma alteração na agenda\nalterada = False\n\n\ndef pede_nome():\n return input(\"Qual Nome: \")\n\n\ndef pede_telefone():\n return input(\"Qual Telefone: \")\n\n\ndef mostra_dados(nome, telefone):\n print(f\"-Nome: {nome} Telefone: {telefone}\")\n\n\ndef pede_nome_arquivo():\n return input(\"Nome do Arquivo: \")\n\n\ndef pesquisa(nome):\n nome = nome.lower()\n for p, e in enumerate(agenda):\n if e[0].lower() == nome:\n return p\n return None\n\n\ndef novo():\n global agenda\n nome = pede_nome().title()\n telefone = pede_telefone()\n agenda.append([nome, telefone])\n\n\ndef confirma(operacao):\n while True:\n opcao = input(f\"Confirmar {operacao} (S/N)?\").upper()\n if opcao in \"SN\":\n return opcao\n else:\n print(\"Opcão inválida. Escolha S ou N.\")\n\n\ndef apaga():\n global agenda\n nome = pede_nome()\n p = pesquisa(nome)\n if p is not None:\n del agenda[p]\n print(\"Não esqueça de salvar.\")\n else:\n print(\"Nome não encontrado.\")\n\n\ndef altera():\n p = pesquisa(pede_nome())\n if p is not None:\n nome = agenda[p][0]\n telefone = agenda[p][1]\n print(\"Telefone Encontrado:\")\n mostra_dados(nome, telefone)\n nome = pede_nome().title()\n telefone = pede_telefone()\n if confirma(\"alteração\") == \"S\":\n agenda[p] = [nome, telefone]\n print(\"Telefone salvo na agenda.\")\n else:\n print(\"Não foi salvo na agenda.\")\n else:\n print(\"Nome não encontrado.\")\n\n\ndef lista():\n print(\"\\nAgenda\\n\\n------\")\n # Usamos a função enumerate para obter a posição na agenda\n for posicao, e in enumerate(agenda):\n # Imprimimos a posição, sem saltar linha\n print(f\"Posição: {posicao}\", end=\"\")\n mostra_dados(e[0], e[1])\n print(\"------\\n\")\n\n\ndef le():\n global agenda\n nome_arquivo = pede_nome_arquivo()\n with open(nome_arquivo, \"r\", encoding=\"utf-8\") as arquivo:\n agenda = []\n for l in arquivo.readlines():\n nome, telefone = l.strip().split(\"#\")\n agenda.append([nome, telefone])\n print(\"Agenda lida com sucesso!\")\n\n\ndef grava():\n global alterada\n if not alterada:\n print(\"Você não alterou a lista. Deseja gravá-la mesmo assim\")\n if confirma(\"gravação\") == \"N\":\n return\n print(\"Gravar\\n------\")\n nome_arquivo = pede_nome_arquivo()\n with open(nome_arquivo, \"w\", encoding=\"utf-8\") as arquivo:\n for e in agenda:\n arquivo.write(f\"{e[0]}#{e[1]}\\n\")\n arquivo.close()\n alterada = False\n\n\ndef valida_faixa_inteiro(pergunta, inicio, fim):\n while True:\n try:\n valor = int(input(pergunta))\n if inicio <= valor <= fim:\n return valor\n except ValueError:\n print(\"valor inválido, favor digitar entre {inicio} e {fim}\")\n\n\ndef menu():\n print(\"\"\"\n \\n Agenda\\n\n 1 - Novo\n 2 - Alterar\n 3 - Apagar\n 4 - Listar\n 5 - Salvar\n 6 - Ler Arquivo\n 7 - Ordena por nome\n\n 0 - Sair\n \"\"\")\n print(f\"\\nNomes na agenda: {len(agenda)}\\n\")\n return valida_faixa_inteiro(\"Escolha uma opção: \", 0, 6)\n\n\nwhile True:\n opcao = menu()\n if opcao == 0:\n break\n if opcao == 1:\n novo()\n elif opcao == 2:\n altera()\n elif opcao == 3:\n apaga()\n elif opcao == 4:\n lista()\n elif opcao == 5:\n grava()\n elif opcao == 6:\n le()\n","repo_name":"Lucasolidev/My-Projects","sub_path":"Prog_Agenda/Programa_Agenda.py","file_name":"Programa_Agenda.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28751139041","text":"# This model are used to compare with classical model\nfrom sklearn.ensemble import RandomForestRegressor\nimport pdb\nimport pandas as pd\n\ndef split_label_features(X, target= \"proportion\", horizon = 6):\n lab = X[target]\n feat = X[[target +'_shift_' + str(horizon), target +'_shift_' + str(horizon+1), 'price', 'margin']]\n return feat, lab\n\ndef compute_basic_model(result, X_test, horizon):\n result.write('Null MAPE %.2f \\n' % (100 * sum(abs(X_test['proportion'])) / sum(X_test['proportion'])))\n result.write('Simple_shift MAPE %.2f\\n' % (\n 100 * sum(abs(X_test['proportion_shift_'+str(horizon)] - X_test['proportion'])) / sum(X_test['proportion'])))\n result.write('Simple 2-mean MAPE %.2f\\n' % (100 * sum(\n abs(X_test['proportion_shift_'+str(horizon)] / 2 + X_test['proportion_shift_'+str(horizon+1)] / 2 - X_test['proportion'])) / sum(\n X_test['proportion'])))\n\n\ndef compute_random_forest(result, X_train, X_valid, X_test, compute_scaled_RF = False):\n train_features, train_labels = split_label_features(X_train)\n valid_features, valid_labels = split_label_features(X_valid)\n test_features, test_labels = split_label_features(X_test)\n\n models = [RandomForestRegressor(), RandomForestRegressor(max_depth = 4), RandomForestRegressor(min_samples_leaf = 10), RandomForestRegressor(min_samples_leaf = 30)]\n err_liste =[]\n m_list =[]\n for model in models:\n model = RandomForestRegressor()\n model.fit(train_features, train_labels)\n y_pred_valid = model.predict(valid_features)\n err_liste.append( sum(abs( y_pred_valid - valid_labels)))\n m_list.append(model)\n min_model =err_liste.index(min(err_liste))\n model = m_list[min_model]\n print(\"meilleur modele RF :\" + str(min_model))\n y_pred = model.predict(test_features)\n result.write('Random Forest MAPE %.2f\\n' % (\n 100 * sum(abs( y_pred - test_labels)) / sum(test_labels)))\n if compute_scaled_RF:\n df_pred = pd.DataFrame(data = y_pred, index=test_features.index, columns =['prev'])\n gpdf = df_pred.groupby(level= 1).sum()\n gpdf.columns = ['sum_pred']\n df_pred = df_pred.join(gpdf)\n result.write('Scaled Random Forest MAPE %.2f\\n' % (\n 100 * sum(abs(df_pred['prev']/df_pred['sum_pred'] - test_labels)) / sum(test_labels)))","repo_name":"garnier94/Concurrent_Neural_Network","sub_path":"evaluation/model_reference.py","file_name":"model_reference.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"11360684470","text":"from mecode import G\nfrom material import init_material\nfrom utility import nomad_header, CNC_TRAVEL_Z, GContext, calc_steps, run_3_stages\nfrom drill import drill\nimport argparse\nimport math\n\n\n# Assume we are at (x, y, 0)\n# Accounts for tool size\n# 'r' radius\n# 'd' diameter\n# 'di' inner diameter to reserve (if fill)\n# 'offset' = 'inside', 'outside', 'middle'\n# 'fill' = True\n# 'z' if specified, the z move to issue before cutting\n# 'return_center' = True, returns to center at the end\n#\ndef hole(g, mat, cut_depth, **kwargs):\n radius = 0\n offset = \"inside\"\n fill = True\n di = None\n\n if 'r' in kwargs:\n radius = kwargs['r']\n if 'd' in kwargs:\n radius = kwargs['d'] / 2\n if 'offset' in kwargs:\n offset = kwargs['offset']\n if 'fill' in kwargs:\n fill = kwargs['fill']\n if 'di' in kwargs:\n di = kwargs['di']\n\n tool_size = mat['tool_size']\n half_tool = tool_size / 2\n\n if offset == 'inside':\n radius_inner = radius - half_tool\n elif offset == 'outside':\n radius_inner = radius + half_tool\n elif offset == 'middle':\n radius_inner = radius\n else:\n raise RuntimeError(\"offset not correctly specified\")\n\n if radius_inner < 0.2:\n raise RuntimeError(f\"Radius too small. Consider a drill. Radius={radius} Inner={radius_inner} (must be 0.2 or greater)\")\n if cut_depth >= 0:\n raise RuntimeError('Cut depth must be less than zero.')\n if mat[\"tool_size\"] < 0:\n raise RuntimeError('Tool size must be zero or greater.')\n\n was_relative = g.is_relative\n\n with GContext(g):\n g.relative()\n\n g.comment(\"hole\")\n g.comment(\"depth=\" + str(cut_depth))\n g.comment(\"tool size=\" + str(mat['tool_size']))\n g.comment(\"radius=\" + str(radius))\n g.comment(\"pass depth=\" + str(mat['pass_depth']))\n g.comment(\"feed rate=\" + str(mat['feed_rate']))\n g.comment(\"plunge rate=\" + str(mat['plunge_rate']))\n\n # The trick is to neither exceed the plunge or the depth-of-cut/pass_depth.\n # Approaches below.\n\n feed_rate = mat['feed_rate']\n path_len_mm = 2.0 * math.pi * radius_inner\n path_time_min = path_len_mm / feed_rate\n plunge_from_path = mat['pass_depth'] / path_time_min\n depth_of_cut = mat['pass_depth']\n\n # Both 1) fast little holes and 2) too fast plunge are bad.\n # Therefore, apply corrections to both. (If for some reason\n # alternate approaches need to be reviewed, they are in\n # source control.)\n if plunge_from_path > mat['plunge_rate']:\n factor = mat['plunge_rate'] / plunge_from_path\n if factor < 0.3:\n factor = 0.3 # slowing down to less than 10% (factor * factor) seems excessive\n depth_of_cut = mat['pass_depth'] * factor\n feed_rate = mat['feed_rate'] * factor\n g.comment('adjusted pass depth=' + str(depth_of_cut))\n g.comment('adjusted feed rate =' + str(feed_rate))\n\n g.spindle('CW', mat['spindle_speed'])\n g.feed(mat['travel_plunge'])\n\n g.move(x=radius_inner)\n if 'z' in kwargs:\n if was_relative:\n g.move(z=kwargs['z'])\n else:\n g.abs_move(z=kwargs['z'])\n\n g.feed(feed_rate)\n\n def path(g, plunge, total_plunge):\n g.arc2(x=-radius_inner, y=radius_inner, i=-radius_inner, j=0, direction='CCW', helix_dim='z',\n helix_len=plunge / 4)\n g.arc2(x=-radius_inner, y=-radius_inner, i=0, j=-radius_inner, direction='CCW', helix_dim='z',\n helix_len=plunge / 4)\n g.arc2(x=radius_inner, y=-radius_inner, i=radius_inner, j=0, direction='CCW', helix_dim='z',\n helix_len=plunge / 4)\n g.arc2(x=radius_inner, y=radius_inner, i=0, j=radius_inner, direction='CCW', helix_dim='z',\n helix_len=plunge / 4)\n\n if fill and radius_inner > half_tool:\n r = radius_inner\n dr = 0\n step = tool_size * 0.8\n min_rad = half_tool * 0.8\n if di:\n min_rad = di / 2\n\n while r > min_rad:\n if r - step < min_rad:\n step = r - min_rad\n r -= step\n\n #print(\"r={} step={}\".format(r, step))\n\n dr += step\n g.move(x=-step)\n g.arc2(x=-r, y=r, i=-r, j=0, direction='CCW')\n g.arc2(x=-r, y=-r, i=0, j=-r, direction='CCW')\n g.arc2(x=r, y=-r, i=r, j=0, direction='CCW')\n g.arc2(x=r, y=r, i=0, j=r, direction='CCW')\n g.move(x=dr)\n\n steps = calc_steps(cut_depth, -depth_of_cut)\n run_3_stages(path, g, steps)\n\n g.move(z=-cut_depth) # up to the starting point\n g.feed(mat['travel_plunge']) # go fast again...else. wow. boring.\n\n g.move(z=1.0)\n\n return_center = True\n if 'return_center' in kwargs:\n return_center = kwargs['return_center']\n\n if return_center:\n g.move(x=-radius_inner) # back to center of the circle\n\n\ndef hole_abs(g, mat, cut_depth, radius, x, y):\n with GContext(g):\n g.absolute()\n g.feed(mat['travel_feed'])\n g.move(z=CNC_TRAVEL_Z)\n g.move(x=x, y=y)\n hole(g, mat, cut_depth, r=radius)\n g.absolute()\n\n\n# assume we are at (x, y, CNC_TRAVEL_Z)\ndef hole_or_drill(g, mat, cut_depth, radius):\n if radius == 0:\n return \"mark\"\n elif mat['tool_size'] + 0.1 < radius * 2:\n if g:\n hole(g, mat, cut_depth, r=radius)\n return \"hole\"\n else:\n if g:\n drill(g, mat, cut_depth)\n return \"drill\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Cut a hole at given radius and depth. Implemented with helical arcs,' +\n 'and avoids plunging.')\n parser.add_argument('material', help='The material to cut in standard machine-material-size format.', type=str)\n parser.add_argument('depth', help='Depth of the cut. Must be negative.', type=float)\n parser.add_argument('radius', help='Radius of the hole.', type=float)\n parser.add_argument('offset', help=\"inside, outside, middle\", type=str)\n args = parser.parse_args()\n\n mat = init_material(args.material)\n g = G(outfile='path.nc', aerotech_include=False, header=None, footer=None, print_lines=False)\n\n nomad_header(g, mat, CNC_TRAVEL_Z)\n g.spindle('CW', mat['spindle_speed'])\n g.move(z=0)\n hole(g, mat, args.depth, r=args.radius, offset=args.offset)\n g.spindle()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"leethomason/saberCNC","sub_path":"hole.py","file_name":"hole.py","file_ext":"py","file_size_in_byte":6753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11204672546","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport os\n\nfrom urllib.parse import urlparse\n\nimport utils \n\n\ndef custom_tokenizer(string):\n final = []\n tokens = [a for a in list(urlparse(string)) if a]\n for t in tokens:\n final.extend(re.compile(\"[.-]\").split(t))\n return final\n\nCSV_PATH = sys.argv[1]\nif CSV_PATH is None:\n raise FileNotFoundError\n\nurls = utils.read_base_dataset(CSV_PATH) # Read the dataset\nx = urls.get('URL', urls.get('url'))\nprint(\"Start the detection...\")\npipeline = utils.load_model('iscxurls_ridgeclassifier')\ny_hat = pipeline.predict(x)\n\nurls['Label'] = y_hat\noutput_filename = os.path.basename(CSV_PATH)\nurls.to_csv(\"csv/\" + output_filename, index=False)\n\nprint(\"Number of samples:\", x.shape[0])\nprint(\"RESULTS:\", urls.Label.value_counts().to_dict())","repo_name":"isados/haktrak-ids","sub_path":"predictMaliciousUrls.py","file_name":"predictMaliciousUrls.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23659601370","text":"import requests\n\ndef getOnePage(url) :\n response = requests.get(url)\n return response\n\ndef main () :\n url = 'http://www.ali213.net/zt/ztisitemap_hot.html'\n html = getOnePage(url).content.decode('utf-8')\n print(html)\n \nif __name__ == '__main__' :\n main()","repo_name":"doufujun/Python3","sub_path":"Instances/Ali213.py","file_name":"Ali213.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9329589216","text":"import pygame\nimport sys\n\nfrom settings import *\nfrom level import Level\n# from debug import debug\n\n\nclass Game:\n def __init__(self):\n\n # general setup\n pygame.init()\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n self.clock = pygame.time.Clock()\n\n # set window title and icon\n pygame.display.set_caption('Zelda?')\n ICON = pygame.image.load('../graphics/icon.jpg')\n pygame.display.set_icon(ICON)\n\n self.level = Level()\n \n def run(self):\n # event loop\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n # update the game\n self.screen.fill('black')\n self.level.run()\n pygame.display.update()\n self.clock.tick(FPS) # FPS determines how many times the game updates itself\n\n\nif __name__ == '__main__':\n game = Game()\n game.run()\n","repo_name":"InsaneHum/zelda_in_pygame","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18244445589","text":"__author__ = 'alisonbento'\n\nimport time\n\nimport hsres\nimport src.resstatus as _status\n\nfrom src.entities.hsextra import HomeShellExtra\n\nfrom src.dao.appliancedao import ApplianceDAO\nfrom src.dao.extradao import ExtraDAO\n\nfrom flask import request\n\n\nclass ExtraResource(hsres.HomeShellResource):\n\n def get(self, appliance_id, extra_key):\n\n appliancedao = ApplianceDAO(self.get_dbc())\n\n if not appliance_id.isdigit():\n self.set_status(_status.STATUS_INVALID_REQUEST)\n return self.end()\n\n appliance = appliancedao.get(appliance_id)\n\n if appliance is None:\n self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)\n return self.end()\n\n extradao = ExtraDAO(self.get_dbc())\n all_extras = extradao.list('appliance_id = ? AND extra_key = ?', (appliance.id, extra_key))\n\n parsed_extras = []\n for extra in all_extras:\n parsed_extras.append(extra.to_array())\n\n self.set_status(_status.STATUS_OK)\n self.add_content('extras', parsed_extras)\n\n return self.end()\n\n def post(self, appliance_id, extra_key):\n appliancedao = ApplianceDAO(self.get_dbc())\n\n appliance_list = appliancedao.select('appliance_hash = ?', (appliance_id,))\n\n if len(appliance_list) <= 0:\n self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)\n return self.end()\n\n appliance = appliance_list[0]\n value = str(request.form.get('value'))\n date = str(request.form.get('date'))\n created = time.strftime('%Y-%m-%d %H:%M:%S')\n\n extradao = ExtraDAO(self.get_dbc())\n extra = HomeShellExtra(0, appliance.id, str(extra_key), value, date, created)\n\n result = extradao.insert(extra)\n\n if result:\n self.get_dbc().commit()\n self.set_status(_status.STATUS_OK)\n else:\n self.get_dbc().rollback()\n self.set_status(_status.STATUS_GENERAL_ERROR)\n\n return self.end()\n\n\nclass ListExtrasResource(hsres.HomeShellResource):\n\n def get(self, appliance_id):\n appliancedao = ApplianceDAO(self.get_dbc())\n appliance = appliancedao.get(appliance_id)\n\n if appliance is None:\n self.set_status(_status.STATUS_APPLIANCE_NOT_FOUND)\n return self.end()\n\n extradao = ExtraDAO(self.get_dbc())\n all_extras = extradao.list('appliance_id = ?', (appliance.id,))\n\n parsed_extras = []\n for extra in all_extras:\n parsed_extras.append(extra.to_array())\n\n self.set_status(_status.STATUS_OK)\n self.add_content('extras', parsed_extras)\n\n return self.end()","repo_name":"abnt713/home-shell","sub_path":"src/resources/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74795062985","text":"class Solution:\n def canReorderDoubled(self, arr: List[int]) -> bool:\n '''\n 看是否可以 兩兩一組\n '''\n counter = collections.Counter(arr)\n \n for a, count in sorted(counter.items(), key=lambda x: x[0]):\n if a == 0:\n if counter[0] % 2 == 1:\n return False\n else:\n counter[0] = 0\n elif a*2 in counter:\n minus = min(counter[a*2], counter[a])\n counter[a*2] -= minus\n counter[a] -= minus\n \n return all([x == 0 for x in counter.values()])\n\n","repo_name":"novayo/LeetCode","sub_path":"0954_Array_of_Doubled_Pairs/try_1.py","file_name":"try_1.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"8859237417","text":"import csv\nimport os\nfrom dotenv import load_dotenv\nfrom urllib.request import Request, urlopen\nfrom peopledatalabs import PDLPY\n\n# Used for local testing\n# load_dotenv()\n# key = os.getenv('PPLDATA_KEY')\n\n# Get API key\nkey = input(\"Enter People Data Labs API key: \")\ncompanies = []\n\n# Read CSV\nfileName = input(\"Enter CSV file name: \")\nwith open(fileName, newline='') as csvfile:\n spamReader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n # Remove header\n rowsList = list(spamReader)\n rowsList.pop(0)\n\n # Add only URLs from CSV to array\n for row in rowsList:\n str = \"\"\n companies.append(str.join(row[1]))\n\ncodeDict = {}\n\nfor company in companies:\n status = \"\"\n # Check if website found by Clearbit\n if company == \"not found\":\n codeDict[company] = \"website not provided\"\n else: \n # Get cross referencing data from People Data Labs API\n client = PDLPY(\n api_key=key,\n )\n result = client.company.enrichment(\n website=company,\n pretty=True,\n ).json()\n\n #If found, add industry and location to dict\n if result['status'] == 200:\n # Some results don't return location, check for None results\n locationName = result['location']\n if locationName == None:\n locationName = \"LocationUnavailable, , \"\n codeDict[company] = (result['industry'] + \",\" + locationName + \",\")\n else:\n locationName = locationName['name'].replace(\" \", \"\")\n codeDict[company] = (result['industry'] + \",\" + locationName + \",\")\n\n # Not found, add message to disct\n elif result['status'] == 404:\n codeDict[company] = (result['message'] + \", , , , \")\n # Error, print message to console\n else: \n print(result['error']['message'])\n\n #Access URL and check if code 200. All others return code \"error\"\n fullAddress = \"http://www.\" + company\n request_site = Request(fullAddress, headers={\"User-Agent\": \"Mozilla/5.0\"})\n try:\n code = urlopen(request_site).getcode()\n # Website opens\n if code == 200:\n status = \"OK\"\n\n # Website doesn't open\n else:\n status = \"NO\"\n\n # urlopen() error\n except:\n status = \"error\"\n\n # Append code to end of string value on dictionary\n codeDict[company] += status \n\n# Write dict with company data to csv\nwith open('crossreference.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',', escapechar=' ', quoting=csv.QUOTE_NONE)\n spamwriter.writerow(['Website'] + ['Industry'] + ['City'] + ['State'] + ['Country'] + ['Opened?'])\n for key in codeDict.keys():\n spamwriter.writerow([key] + [codeDict[key]])\n print(\"Cross reference CSV saved succesfully.\")","repo_name":"ifmachado/data-gathering-scripts","sub_path":"crossReferenceScript.py","file_name":"crossReferenceScript.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70178565386","text":"from rest_framework import serializers\nfrom .......models.exempt_assessment_roll_model import ExemptAssessmentRoll\nfrom ..serializers.display_assessment_serializer import DisplayAssessmentSerializer\n\nclass DisplayExemptAssessmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = ExemptAssessmentRoll\n fields = ['id','revision_year','prov_city', 'prov_city_index_no', 'mun_city', 'mun_city_index_no', 'barangay',\n 'barangay_index_no', 'section','section_index_no','date_prepared','date_modified']\n \n def to_representation(self, instance):\n rep = super().to_representation(instance)\n rep['exempt_assessments'] = DisplayAssessmentSerializer(instance.exemptassessment_set.all(),many=True).data\n rep['total_assessments'] = instance.exemptassessment_set.count()\n return rep","repo_name":"Ronuel-R/Digital_Dexterity_Backend","sub_path":"digital_dex_admin_web/versions/v1p0/features/exempt_assessment_roll/display_exempt_assessment/serializers/display_exempt_assessment_serializer.py","file_name":"display_exempt_assessment_serializer.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36139420413","text":"import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom Cardiac_Electrophysiology import *\nfrom KoopmanDL import *\nfrom Hybrid import *\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=5120)])\n except RuntimeError as e:\n print(e)\n\n# Load data\n# s = np.load('./data/s_sample_50.npy')\n# v = np.load('./data/v_sample_50.npy')\ns = np.load('./data/s_sample_0416.npy')\nv = np.load('./data/v_sample_0416.npy')\n\n\n# Build the input tensor\nDataModel = Cardiac_Electrophysiology_DataModel()\nx1 = np.linspace(0,10,51)\nt = np.linspace(0,10,50)\nu_t = np.sin(t)\nu_x_mesh = DataModel.dx(x1,x1)\nu = u_t[:,np.newaxis,np.newaxis] * u_x_mesh[np.newaxis,:]\n\n# Build v_xx\ndlt_x = x1[1]-x1[0]\nv_train_xx = np.zeros(np.shape(v))\nv_train_xx[:,:,0] = (2 * v[:,:,1] - 2 * v[:,:,0])/dlt_x**2\nfor i in range(1, np.shape(v)[2]-1):\n v_train_xx[:,:,i] = (v[:,:,i-1] + v[:,:,i+1] - 2*v[:,:,i])/dlt_x**2\nv_train_xx[:,:,-1] = (2 * v[:,:,-2] - 2 * v[:,:,-1])/dlt_x**2\n\n# Build v_yy\ndlt_y = x1[1]-x1[0]\nv_train_yy = np.zeros(np.shape(v))\nv_train_yy[:,:,0] = (2 * v[:,1,:] - 2 * v[:,0,:])/dlt_y**2\nfor i in range(1, np.shape(v)[1]-1):\n v_train_yy[:,i,:] = (v[:,i-1,:] + v[:,i+1,:] - 2*v[:,i,:])/dlt_y**2\nv_train_yy[:,:,-1] = (2 * v[:,-2,:] - 2 * v[:,-1,:])/dlt_y**2\n\n# Build v_data, s_data\nv_train = v\ns_train = s\n\n# Build training data\nv1 = np.reshape(v_train[2:,:,:],(-1, 1))\nv0 = np.reshape(v_train[1:-1,:,:],(-1,1))\nlacev_x0 = np.reshape(v_train_xx[1:-1,:,:],(-1,1))\nlacev_y0 = np.reshape(v_train_yy[1:-1,:,:],(-1,1))\n\nlace_data = np.concatenate((lacev_x0,lacev_y0),axis=1)\n\nm1 = np.reshape(s_train[2:,0,:,:],(-1,1))\nm0 = np.reshape(s_train[1:-1,0,:,:],(-1,1))\nn1 = np.reshape(s_train[2:,1,:,:],(-1,1))\nn0 = np.reshape(s_train[1:-1,1,:,:],(-1,1))\nh1 = np.reshape(s_train[2:,2,:,:],(-1,1))\nh0 = np.reshape(s_train[1:-1,2,:,:],(-1,1))\n\nx_data = np.concatenate((v0,m0,n0,h0),axis=1)\ny_data = np.concatenate((v1,m1,n1,h1),axis=1)\nu_data = np.reshape(u[1:-1,:,:],(-1,1))\n\nfusion_data = np.concatenate((np.reshape(x_data[:,0],(-1,1)),lace_data),axis = 1)\n\nw, y_pred = Hybrid_compute_linear_weight(fusion_data, y_data[:,0])\n\nmodel = KoopmanDL_Model(target_dim=4, u_dim=1,\n dic_trainable=KoopmanDL_DicNN, dic_layer_sizes=[64,64],\n operator_layer_sizes=[32,32])\n\nmodel.Build()\n\n# Koopman-DL pre train\nlr = 0.001\nopt = tf.keras.optimizers.Adam(learning_rate=lr)\nmodel.model_KoopmanDL.compile(optimizer=opt, loss='mse')\n\nx_data_scaler = model.Build_x_scaler(x_data)\ny_data_scaler = model.Build_y_scaler(y_data)\nu_data_scaler = model.Build_u_scaler(u_data)\n\nx_train, x_test, y_train, y_test, u_train, u_test = train_test_split(x_data_scaler, y_data_scaler, u_data_scaler, test_size=0.33, random_state=None)\n\ntf.norm(model.model_KoopmanDL([x_train, y_train, u_train]),axis=1)\n\niters = 2\nepochs = [2,10]\nzeros_data_y_train = tf.zeros_like(model.dic.call(y_train))\nzeros_data_y_test = tf.zeros_like(model.dic.call(y_test))\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0],[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=5120)])\n except RuntimeError as e:\n print(e)\nfor i in range(iters):\n model.Train_Operator()\n model.model_KoopmanDL.fit([x_train, y_train, u_train], zeros_data_y_train, validation_data = ([x_test, y_test, u_test], zeros_data_y_test),epochs=epochs[0],verbose=0,batch_size=4096)\n model.Train_Dic()\n model.model_KoopmanDL.fit([x_train, y_train, u_train], zeros_data_y_train, validation_data = ([x_test, y_test, u_test], zeros_data_y_test),epochs=epochs[1],verbose=0,batch_size=4096)\n \n# model.model_KoopmanDL.save_weights('./checkpoints/ckpt')\n\ny_direct_koopman = model.Predict( x_data_scaler, u_data_scaler)\ny_direct_koopman = model.inverse_transform_y(y_direct_koopman)\n\nerr = 1e-6\nmu_last_pred = 1e8 * np.ones(np.shape(w))\nmu_pred = w\nv_linear = np.matmul(fusion_data,w)\nprint(w)\nerr_history = []\nmu_history = []\nerr_v = []\n\nmodel.Train_Operator()\n\nerr_data = np.zeros(np.shape(y_data_scaler))\nerr_data[:,:] = y_data[:,:]\nstep = 0\n\n# Build Error\nerr_data[:,0] = y_data[:,0] - np.reshape(v_linear,(1,-1))\nerr_data_scaler = model.Build_y_scaler(err_data)\n \n# Koopman\nx_train, x_test, err_train, err_test, u_train, u_test = train_test_split(x_data_scaler, err_data_scaler, u_data_scaler, test_size=0.33, random_state=None)\n# model.train_model([x_train, err_train, u_train], [x_test, err_test, u_test], 30, reg_para = 0)\nmodel.model_KoopmanDL.fit([x_train, err_train, u_train], zeros_data_y_train, validation_data = ([x_test, err_test, u_test], zeros_data_y_test),epochs=300,verbose=0,batch_size=4096)\nv_koopman_scaler = model.Predict(x_data_scaler, u_data_scaler)\nv_koopman_origin = model.inverse_transform_y(v_koopman_scaler)\nerr_for_linear = y_data[:,0] - v_koopman_origin[:,0]\nprint(model.loss_fun(v_koopman_origin, err_data))\nerr_history.append(model.loss_fun(v_koopman_origin, err_data))\nerr_v.append(model.loss_fun(v_koopman_origin[:,0], err_data[:,0]))\n \n# Linear\nmu_history.append(mu_pred)\nmu_last_pred = mu_pred\nmu_pred, v_linear = Hybrid_compute_linear_weight(fusion_data, err_for_linear)\n\n# Build Error\nerr_data[:,0] = y_data[:,0] - np.reshape(v_linear,(1,-1))\nerr_data_scaler = model.Build_y_scaler(err_data)\n \nv_linear_pre = v_linear\nv_koopman_pre = v_koopman_origin[:,0]\n\nwhile (tf.norm(mu_last_pred-mu_pred)>err and step <= 20):\n step = step + 1\n \n # Koopman\n x_train, x_test, err_train, err_test, u_train, u_test = train_test_split(x_data_scaler, err_data_scaler, u_data_scaler, test_size=0.33, random_state=None)\n model.model_KoopmanDL.fit([x_train, err_train, u_train], zeros_data_y_train, validation_data = ([x_test, err_test, u_test], zeros_data_y_test),epochs=300,verbose=0,batch_size=4096)\n v_koopman_scaler = model.Predict(x_data_scaler, u_data_scaler)\n v_koopman_origin = model.inverse_transform_y(v_koopman_scaler)\n err_for_linear = y_data[:,0] - v_koopman_origin[:,0]\n print(model.loss_fun(v_koopman_origin, err_data))\n err_history.append(model.loss_fun(v_koopman_origin, err_data))\n err_v.append(model.loss_fun(v_koopman_origin[:,0], err_data[:,0]))\n \n # Linear\n mu_history.append(mu_pred)\n mu_last_pred = mu_pred\n mu_pred, v_linear = Hybrid_compute_linear_weight(fusion_data, err_for_linear)\n print(mu_pred) \n \n # Build Error \n dlt_err = tf.squeeze(v_linear) + v_koopman_origin[:,0] - tf.squeeze(v_linear_pre) - v_koopman_pre\n dlt_k = y_data[:,0] - tf.squeeze(v_linear_pre) - v_koopman_pre\n max_val = max(np.max(np.abs(dlt_err)), np.max(np.abs(dlt_k)))\n dlt_err_norm = dlt_err / max_val\n dlt_k_norm = dlt_k / max_val\n dot_product = np.sum(dlt_err_norm * dlt_k_norm)\n dlt_err_norm_square = np.sum(dlt_err_norm**2)\n result = dot_product / dlt_err_norm_square\n t_F = result\n print(t_F)\n err_data[:,0] = y_data[:,0] - (t_F * np.reshape(v_linear,(1,-1)) + (1-t_F) * np.reshape(v_linear_pre, (1,-1)))\n err_data_scaler = model.Build_y_scaler(err_data)\n\n\nnp.save('./output/mu_history_relax_2.npy',mu_history)\nnp.save('./output/err_history_relax_2.npy',err_history)\nnp.save('./output/err_history_v_relax_2.npy',err_v)\n\n\n\nnp.save('./output/y_max_relax_1.npy', model.y_max)\nnp.save('./output/y_min_relax_1.npy', model.y_min)\nmodel.model_KoopmanDL.save_weights('./hybrid_checkpoints_relax_1/ckpt')\n\nprint(\"The training of Relaxed Hybrid Method is down.\")\n \n \n","repo_name":"Shiqi-Wu/Nonintrusive-model-combination-for-learning-dynamics","sub_path":"cardiac-coupled-system/acceleration-train.py","file_name":"acceleration-train.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74929994823","text":"from datetime import datetime as dt\nfrom pathlib import Path\n\nfrom gate_control.config import LOG_LEVEL\n\nfolder = Path().home() / 'logs'\n\ndef log(path,log_level,details):\n if LOG_LEVEL > log_level: return\n if not folder.exists():\n folder.mkdir()\n with open(folder/f'{path}.txt','a') as f:\n f.write(str(details) + '\\n')","repo_name":"mark-styx/gate-controller","sub_path":"gate_control/__classes__/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40254789718","text":"from cProfile import label\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndiv_TS = np.loadtxt(\"data/task4_divTS.txt\")\nnMU = np.loadtxt(\"data/task4_F6.txt\")\n\nplt.figure()\nplt.plot(nMU[0,:], label=\"U\")\nplt.plot(nMU[1,:], label=\"M\")\nplt.vlines(div_TS, ymin=0, ymax=40, colors=\"red\", linestyles=\"dashed\")\nplt.legend()\n\ndiffMA = 2*nMU[1,:] + nMU[0,:] - 60\n\nplt.figure()\nplt.hist(diffMA, bins=120, label=f\"$\\gamma = -1.0$\")\nplt.legend()\n\nplt.show()\n","repo_name":"Snipersune/DynMod","sub_path":"Lab2/Task4_plot.py","file_name":"Task4_plot.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10337755488","text":"# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\ndef _encoder_block_unet(input, num_filters):\n \"\"\"\n Encoder logic for U-Net\n :param input: KerasTensor\n :param num_filters: Int - number of output filters in convolution\n :return: KerasTensor\n \"\"\"\n x = layers.Conv2D(num_filters, 3, padding=\"same\")(input)\n x = layers.MaxPool2D((2, 2))(x)\n # x = layers.BatchNormalization()(x)\n x = layers.Activation(\"relu\")(x)\n return x\n\n\ndef _encoder_block_kinet(input, num_filters):\n \"\"\"\n Encoder logic for Ki-Net\n :param input: KerasTensor\n :param num_filters: Int - number of output filters in convolution\n :return: KerasTensor\n \"\"\"\n x = layers.Conv2D(num_filters, 3, padding=\"same\")(input)\n x = layers.UpSampling2D(size=(2, 2), interpolation=\"bilinear\")(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation(\"relu\")(x)\n p = layers.MaxPool2D((2, 2))(x)\n return x\n\n\ndef _decoder_block_unet(input, num_filters):\n \"\"\"\n Decoder logic for U-Net\n :param input: KerasTensor\n :param num_filters: Int - number of output filters in convolution\n :return: KerasTensor\n \"\"\"\n x = layers.Conv2D(num_filters, 3, padding=\"same\")(input)\n x = layers.UpSampling2D(size=(2, 2), interpolation=\"bilinear\")(x)\n # x = layers.BatchNormalization()(x)\n x = layers.Activation(\"relu\")(x)\n return x\n\n\ndef _decoder_block_kinet(input, num_filters):\n \"\"\"\n Decoder logic for Ki-net\n :param input: KerasTensor\n :param num_filters: Int - number of output filters in convolution\n :return: KerasTensor\n \"\"\"\n x = layers.Conv2D(num_filters, 3, padding=\"same\")(input)\n x = layers.MaxPool2D((2, 2))(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation(\"relu\")(x)\n print(type(x))\n return x\n\n\ndef _crfb(x1, x2, num_filters, scale_factor):\n \"\"\"\n CRFB is the cross residual fusion block which fuses the outputs at the specified layers\n from one model to the other, and returns the results as input for the next step.\n :param x1: KerasTensor\n :param x2: KerasTensor\n :param num_filters: Int - number of output filters in convolution\n :param scale_factor: Float - scale factor for resizing (upsampling/downsampling) of output\n :return: KerasTensor\n \"\"\"\n new_w = int(x2.shape[1] * scale_factor)\n new_h = int(x2.shape[2] * scale_factor)\n out = layers.Conv2D(num_filters, 3, padding=\"same\")(x2)\n out = layers.Activation(\"relu\")(out)\n out = tf.image.resize(out, [new_h, new_w])\n output = layers.Add()([x1, out])\n return output\n\n\ndef kiunet(input_shape, num_classes):\n \"\"\"\n Architecture of the KiU-Net model, which combines the Ki-Net and U-Net architecture\n :param num_classes:\n :param input_shape: Tuple of the shape of the input\n :return: keras.Model\n \"\"\"\n inputs = layers.Input(shape=input_shape)\n\n # ENCODER BLOCK #\n\n s1 = _encoder_block_unet(inputs, 16) # U NET ENCODER\n k1 = _encoder_block_kinet(inputs, 16) # KINET ENCODER\n\n u1 = _crfb(s1, k1, 16, 0.25) # CRFB U1 UNET\n o1 = _crfb(k1, s1, 16, 4) # CRFB O1 KINET\n\n s2 = _encoder_block_unet(u1, 32) # UNET ENCODER\n k2 = _encoder_block_kinet(o1, 32) # KINET ENCODER\n\n u2 = _crfb(s2, k2, 32, 0.0625) # CRFB U2 UNET\n o2 = _crfb(k2, s2, 32, 16) # CRFB O2 KINET\n\n s3 = _encoder_block_unet(u2, 64) # UNET ENCODER\n k3 = _encoder_block_kinet(o2, 64) # KINET ENCODER\n\n u3 = _crfb(s3, k3, 64, 0.015625) # CRFB U3 UNET\n o3 = _crfb(k3, s2, 64, 32) # CRFB O3 KINET\n\n # DECODER BLOCK #\n\n d1_u = _decoder_block_unet(u3, 32) # UNET DECODER\n d1_k = _decoder_block_kinet(o3, 32) # KINET DECODER\n\n d_u1 = _crfb(d1_u, d1_k, 32, 0.0625) # CRFB D_U1 UNET\n\n d_o1 = _crfb(d1_k, d1_u, 32, 16) # CRFB D_O1 KINET\n\n out = layers.Add()([d_u1, s2]) # CONCATENTATION D_U1 & S2 UNET\n out1 = layers.Add()([d_o1, k2]) # CONCATENTATION D_O1 & K2 KINET\n\n d2_u = _decoder_block_unet(out, 16) # UNET DECODER\n d2_k = _decoder_block_kinet(out1, 16) # KINET DECODER\n\n d_u2 = _crfb(d2_u, d2_k, 16, 0.25) # CRFB D_U2 UNET\n d_o2 = _crfb(d2_k, d2_u, 16, 4) # CRFB D_O2 KINET\n\n out = layers.Add()([d_u2, s1]) # CONCATENATION D_U2 & S1 UNET\n out1 = layers.Add()([d_o2, k1]) # CONCATENATION D_O2 & K2 KINET\n\n d3_u = _decoder_block_unet(out, 8) # UNET DECODER\n d3_k = _decoder_block_kinet(out1, 8) # KINET DECODER\n\n out = layers.Add()([d3_u, d3_k]) # FINAL CONCATENATION OUTPUT FROM UNET AND KINET\n\n out = layers.Conv2D(num_classes, 1, padding=\"valid\", activation=\"relu\")(out) # FINAL CONVOLUTIONAL LAYER\n kiunet_model = keras.Model(inputs, out, name=\"KiU-Net\") # MODEL\n\n return kiunet_model\n\n\ndef unet(input_shape, num_classes):\n \"\"\"\n Architecture of the U-Net model\n :param num_classes:\n :param input_shape: Tuple of the shape of the input\n :return: keras.Model\n \"\"\"\n inputs = layers.Input(shape=input_shape)\n print(input_shape)\n # ENCODER BLOCK #\n\n s1 = _encoder_block_unet(inputs, 32) # U NET ENCODER\n s2 = _encoder_block_unet(s1, 64) # UNET ENCODER\n s3 = _encoder_block_unet(s2, 128) # UNET ENCODER\n s4 = _encoder_block_unet(s3, 256) # UNET ENCODER\n out = _encoder_block_unet(s4, 512) # UNET ENCODER\n\n # DECODER BLOCK #\n\n out = _decoder_block_unet(out, 256)\n out = layers.Add()([out, s4])\n out = _decoder_block_unet(out, 128)\n out = layers.Add()([out, s3])\n out = _decoder_block_unet(out, 64) # UNET DECODER\n out = layers.Add()([out, s2]) # CONCATENTATION D_U1 & S2 UNET\n out = _decoder_block_unet(out, 32) # UNET DECODER\n out = layers.Add()([out, s1]) # CONCATENATION D_U2 & S1 UNET\n out = _decoder_block_unet(out, num_classes) # UNET DECODER\n unet_model = tf.keras.Model(inputs, out, name=\"U-Net\") # MODEL\n\n return unet_model\n\n\ndef segnet(input_shape, num_classes):\n \"\"\"\n Architecture of the Seg-Net model\n :param num_classes:\n :param input_shape: Tuple of the shape of the input\n :return: keras.Model\n \"\"\"\n inputs = layers.Input(shape=input_shape)\n s0 = _encoder_block_unet(inputs, 16)\n s1 = _encoder_block_unet(s0, 32)\n s2 = _encoder_block_unet(s1, 64)\n s3 = _encoder_block_unet(s2, 128)\n s4 = _encoder_block_unet(s3, 256)\n\n out = _decoder_block_unet(s4, 128)\n out = _decoder_block_unet(out, 64)\n out = _decoder_block_unet(out, 32)\n out = _decoder_block_unet(out, 16)\n out = _decoder_block_unet(out, num_classes)\n segnet_model = tf.keras.Model(inputs, out, name=\"SegNet\")\n\n return segnet_model\n","repo_name":"nkitner/KiU-NetProject","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72545092425","text":"from django import forms\nfrom .models import Company, Copier\n\n\nclass CopierForm(forms.ModelForm):\n class Meta:\n model = Copier\n fields = ('company', 'color_mono', 'bit', 'model_name', 'driver_file')\n\n\nclass CompanyScannerForm(forms.ModelForm):\n\n scanner = forms.FileField(required=True)\n\n def __init__(self, *args, **kwargs):\n super(CompanyScannerForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget.attrs['readonly'] = True\n\n class Meta:\n model = Company\n fields = ('name', 'scanner')\n","repo_name":"Clark-Park/printer","sub_path":"copier/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11636775318","text":"#\n# @lc app=leetcode.cn id=743 lang=python3\n#\n# [743] 网络延迟时间\n#\n\n# @lc code=start\n# Dijkstra\n\nimport collections\nclass Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n graph = collections.defaultdict(dict)\n for s,e,c in times:\n graph[s][e] = c\n\n seen = [-1]*n\n Q = [[0, k]] # 初始位置\n\n heapq.heapify(Q)\n\n while Q:\n cost, cur_pos = heapq.heappop(Q)\n if seen[cur_pos-1] == -1:\n seen[cur_pos-1] = cost\n for next_pos in graph[cur_pos]:\n heapq.heappush(Q, [cost + graph[cur_pos][next_pos], next_pos])\n\n if -1 in seen:\n return -1\n return max(seen)\n\n# @lc code=end\n\n","repo_name":"TheBinKing/Programming-learning","sub_path":"Leetcode-Python3/743.网络延迟时间.py","file_name":"743.网络延迟时间.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38837608771","text":"import matplotlib\r\nimport LT_Fit.parameters as P\r\nimport LT_Fit.gen_fit as G\r\nfrom matplotlib.ticker import FormatStrFormatter\r\nfrom getData import *\r\n\r\nl = 15.2 # length\r\nw = 14.0 # width\r\n\r\nd = 61.6 # separation of the plates\r\n\r\nN = 10000 # number of rays to generate\r\n\r\n# in degree\r\nalpha = 90\r\nfactor = 1 # (np.cos(alpha*np.pi/180))**2\r\nrate = 0.66 * factor\r\n\r\nbeans = 10\r\n\r\n\r\ndef Hit_by_Angle(N, ang):\r\n hit = 0;\r\n for i in range(N):\r\n\r\n cost = np.power(np.random.rand(), 1 / 3)\r\n\r\n # The phi distribution is thrown “flat”.\r\n\r\n phi = np.random.rand() * 2 * np.pi\r\n\r\n # place on the top where the ray is going to hit\r\n\r\n x_top = np.random.rand() * w\r\n y_top = np.random.rand() * l\r\n\r\n # We “know” that this one hits the top scintillator. Now see if it hits the bottom one.\r\n # Coordinate system: Z is up, B is wrt. Z-axis, 4 around it. X is in the “width” direction,\r\n # Y in the “length” direction. Given cos 0 and 0, see if the cosmic hits the panel at the bottom. For this to be true,\r\n # first calculate the direction tangents in x and y.\r\n\r\n sinp = np.sin(phi);\r\n cosp = np.cos(phi);\r\n\r\n sint = np.sqrt(1 - cost * cost)\r\n\r\n tant = sint / cost;\r\n\r\n tantx = tant * sinp;\r\n tanty = tant * cosp;\r\n # Extrapolate to the bottom counter\r\n\r\n xbot = x_top - tantx * d;\r\n ybot = y_top - tanty * d;\r\n\r\n # if the ray hit the bottom plate then the coodenates (x_bot ; y_bot) should be inside of the box [(0-w) ; (0-l)], which define the superfice of the bottom plate\r\n\r\n if xbot <= w * np.cos(ang) and xbot >= 0 and ybot <= l * np.cos(ang) and ybot >= 0:\r\n hit += 1;\r\n return hit;\r\n\r\n\r\nx = np.linspace(-1 * np.pi / 2, np.pi / 2, 46)\r\ny = [Hit_by_Angle(10000, k) for k in x]\r\ndy = np.sqrt(y)/max(y)\r\ny = y/np.max(y)\r\n\r\nf,ax = B.pl.subplots(1,num='Cosmic_Ray_Simulation')\r\nB.plot_exp(x, y, dy)\r\n\r\nA = P.Parameter(250., 'A')\r\nBs = P.Parameter(1., 'B')\r\nC = P.Parameter(1., 'C')\r\nD = P.Parameter(1., 'D')\r\n\r\n\r\ndef cosf(x):\r\n return -1*A() * np.cos(Bs() * x + C()) ** 2 + D()\r\n\r\n\r\nfit = G.genfit(cosf, [A, Bs, C, D], x=x, y=y)\r\n\r\n\r\nax.xaxis.set_major_formatter(FormatStrFormatter('%g $\\pi$'))\r\nax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(base=0.5))\r\nB.plot_line(fit.xpl,fit.ypl,color='red',label='$cos^2$ fit')\r\nlabels('Angles (radians)','Normalized Counts','Simulated Cosmic Rays',annotate='Fit: $A cos^2(Bx+C)+D$'+\"\\n\"*4,fit=fit,xy=(0.05,.62))\r\nB.pl.legend()\r\n","repo_name":"BryanZero/Undergradute-Labs","sub_path":"Cosmic_Ray/Cosmic_Ray_Simulation.py","file_name":"Cosmic_Ray_Simulation.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14896460911","text":"# https://leetcode.com/problems/find-all-anagrams-in-a-string/description/\nclass Solution:\n def findAnagrams(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: List[int]\n \"\"\"\n if len(p) > len(s):\n return []\n if len(p) == len(s):\n if p == s:\n return [0]\n else:\n return []\n if len(set(s)) == 1 and set(s) == set(p):\n return [i for i in range(len(s) - len(p) + 1)]\n r = []\n ch_dict = self.get_ch_dict(p)\n p_len = len(p)\n s_len = len(s)\n i = 0\n while i < s_len - p_len + 1:\n for j in range(p_len):\n if j == p_len - 1 and ch_dict.get(s[i + j], 0) == 1:\n r.append(i)\n while i + p_len < s_len:\n if s[i] != s[i + p_len]:\n i += p_len - 2\n break\n i += 1\n r.append(i)\n ch_dict = self.get_ch_dict(p)\n break\n if ch_dict.get(s[i + j], 0) < 1:\n ch_dict = self.get_ch_dict(p)\n break\n else:\n ch_dict[s[i + j]] -= 1\n i += 1\n return r\n\n def get_ch_dict(self, p):\n ch_dict = {}\n for c in p:\n if ch_dict.get(c, None):\n ch_dict[c] += 1\n else:\n ch_dict[c] = 1\n return ch_dict\n\n def dfs(self, ch_dict, s):\n if len(s) == 1 and ch_dict.get(s, 0) == 1:\n return True\n if len(ch_dict) == 1 and len(set(s)) == 1 and ch_dict.get(s[0], 0) == len(s):\n return True\n\n if s[0] not in ch_dict or ch_dict.get(s[0], 0) < 1:\n return False\n else:\n ch_dict[s[0]] -= 1\n r = self.dfs(ch_dict, s[1:])\n ch_dict[s[0]] += 1\n return r\n\n\nso = Solution()\ns = \"acdcaeccde\"\np = \"c\"\nprint(so.findAnagrams(s, p))\n","repo_name":"shaheming/leecode","sub_path":"summer/2018_07_16/find-all-anagrams-in-a-string.py","file_name":"find-all-anagrams-in-a-string.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18390897424","text":"from Core.Mode import ToStringMode\n\n\nclass Dimension:\n \"\"\"Holds x and y dimension in one place\"\"\"\n\n def __init__(self, x: int, y: int):\n self.x = x\n self.y = y\n\n def __str__(self, mode: ToStringMode = ToStringMode.Pretty):\n if mode == ToStringMode.Pretty:\n return \"[\" + str(self.x) + \", \" + str(self.y) + \"]\"\n else:\n return str(self.x) + \" \" + str(self.y)\n","repo_name":"MichalTarnacki/_rozne","sub_path":"_python/symulacja_organizmow/Core/Dimension.py","file_name":"Dimension.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36974592956","text":"# To add a new cell, type '#%%'\n# To add a new markdown cell, type '#%% [markdown]'\n#%%\n#from IPython import get_ipython\n\n#%% [markdown]\n# # Continuous Control\n# \n# ---\n# \n# You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started!\n# \n# ### 1. Start the Environment\n# \n# Run the next code cell to install a few packages. This line will take a few minutes to run!\n\n#%%\n#get_ipython().system('pip -q install ./python')\n\n#%% [markdown]\n# The environments corresponding to both versions of the environment are already saved in the Workspace and can be accessed at the file paths provided below. \n# \n# Please select one of the two options below for loading the environment.\n\n#%%\nfrom unityagents import UnityEnvironment\nimport numpy as np\n\n# select this option to load version 1 (with a single agent) of the environment\nenv = UnityEnvironment(file_name='Reacher_Linux/Reacher.x86_64',no_graphics=True)\n\n# select this option to load version 2 (with 20 agents) of the environment\n# env = UnityEnvironment(file_name='/data/Reacher_Linux_NoVis/Reacher.x86_64')\n\n#%% [markdown]\n# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.\n\n#%%\n# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\nfor brains in env.brain_names:\n print(brains)\n\n#%% [markdown]\n# ### 2. Examine the State and Action Spaces\n# \n# Run the code cell below to print some information about the environment.\n\n#%%\n# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n\n# number of agents\nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\nprint('Size of each action:', action_size)\n\n# examine the state space \nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\nprint('The state for the first agent looks like:', states[0])\n\n#%% [markdown]\n# ### 3. Loading DDPG Agent\n# \n\n#%%\nfrom importlib import reload \nfrom collections import deque\nimport ddpg_agent\nreload(ddpg_agent)\nimport torch\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"DEVICE is \", DEVICE)\n#from workspace_utils import active_session\n\n\n#%%\ndef play(env = None):\n for i in range(3):\n env_info = env.reset(train_mode=False)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n for j in range(200):\n action = agent.act(state)\n env.render()\n state, reward, done, _ = env.step(action)\n if done:\n break \n\n#%%\ndef train(env = None, n_episodes=200, agent = None, \n checkpoint_score = 25, breakpoint_score = 30, filename_prefix = \"\"):\n \n \n scores_deque = deque(maxlen=100)\n scores = []\n goal_steps = []\n goal_rewards = []\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n agent.reset()\n score = 0\n modified_score = 0\n goal_steps.clear()\n goal_rewards.clear()\n while True:\n action = agent.act(state)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n if reward > 0.0:\n #goal_steps.append(t)\n goal_rewards.append(reward)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n\n scores_deque.append(score)\n scores.append(score)\n fname = filename_prefix\n\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tscore: {:.2f}'.\n format(i_episode, np.mean(scores_deque), score), end=\"\")\n #print(\"\\ngoal_steps \", goal_steps)\n #print(\"goal_rewards \", goal_rewards)\n #print(\"\\n\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n if np.mean(scores_deque)>=checkpoint_score:\n fname += str(i_episode)\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))\n torch.save(agent.qnetwork_local.state_dict(), fname + 'checkpoint.pth')\n if np.mean(scores_deque)>=breakpoint_score:\n fname += str(i_episode)\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))\n torch.save(agent.qnetwork_local.state_dict(), fname + 'checkpoint.pth')\n break \n \n return scores\n\n\n\n\n#%%\n\nagent = ddpg_agent.Agent(state_size=state_size, \n action_size=action_size, \n random_seed=0)\n\n\n#%%\nrr_scores = train( env = env,\n agent = agent) # random replay training\n\n\n#%%\nimport matplotlib.pyplot as plt\n\ndef plot_scores(scores):\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()\n\n\n#%%\nplot_scores(rr_scores) # random replay scores\n\n#%% \n# When finished, you can close the environment.\n# load the weights from file\nplay(env = env)\n\n\n \n\n#%%\nenv.close()\n\n\n","repo_name":"pinakigupta/DDPG-ReacherEnv","sub_path":"Continuous_Control (copy).py","file_name":"Continuous_Control (copy).py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18556066024","text":"\nfrom kivy.app import App\nfrom kivy.properties import ObjectProperty\nfrom kivy.graphics import Rectangle, Color\nfrom kivy.lang import Builder\n\nkv = \"\"\"\nBoxLayout:\n orientation: 'vertical'\n Button:\n btn: btn\n text: \"something\"\n mouse_pos: btn.text=\"Text Changed\"\n Button:\n id: btn\n text: \"another thing\"\n\"\"\"\n\nclass Test(App):\n layout = ObjectProperty(None)\n def build(self):\n return Builder.load_string(kv)\n\nif(__name__ == '__main__'):\n Test().run()\n","repo_name":"ShahadatHossainRafi/Digitas","sub_path":"Temp/on widget/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34923723415","text":"#\n# code skeleton from https://github.com/janvdvegt/KernelMixtureNetwork\n# this version additionally supports fit_by_crossval and multidimentional Y\n#\nimport math\nimport numpy as np\nimport sklearn\nimport tensorflow as tf\nimport edward as ed\nfrom edward.models import Categorical, Mixture, MultivariateNormalDiag\nfrom cde.utils.tf_utils.network import MLP\nimport cde.utils.tf_utils.layers as L\nfrom cde.utils.tf_utils.layers_powered import LayersPowered\nfrom cde.utils.serializable import Serializable\n#import matplotlib.pyplot as plt\n\n\nfrom cde.utils.center_point_select import sample_center_points\nfrom cde.density_estimator.BaseNNMixtureEstimator import BaseNNMixtureEstimator\n\nimport logging\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\ntf.logging.set_verbosity(tf.logging.ERROR)\n\n\nclass KernelMixtureNetwork(BaseNNMixtureEstimator):\n\n \"\"\" Kernel Mixture Network Estimator\n\n https://arxiv.org/abs/1705.07111\n\n Args:\n name: (str) name space of MDN (should be unique in code, otherwise tensorflow namespace collitions may arise)\n ndim_x: (int) dimensionality of x variable\n ndim_y: (int) dimensionality of y variable\n center_sampling_method: String that describes the method to use for finding kernel centers. Allowed values \\\n [all, random, distance, k_means, agglomerative]\n n_centers: Number of kernels to use in the output\n keep_edges: Keep the extreme y values as center to keep expressiveness\n init_scales: List or scalar that describes (initial) values of bandwidth parameter\n train_scales: Boolean that describes whether or not to make the scales trainable\n x_noise_std: (optional) standard deviation of Gaussian noise over the the training data X\n y_noise_std: (optional) standard deviation of Gaussian noise over the the training data Y\n adaptive_noise_fn: (callable) that takes the number of samples and the data dimensionality as arguments and returns\n the noise std as float - if used, the x_noise_std and y_noise_std have no effect\n entropy_reg_coef: (optional) scalar float coefficient for shannon entropy penalty on the mixture component weight distribution\n weight_decay: (float) the amount of decoupled (http://arxiv.org/abs/1711.05101) weight decay to apply\n l2_reg: (float) the amount of l2 penalty on neural network weights\n l1_reg: (float) the amount of l1 penalty on neural network weights\n weight_normalization: boolean specifying whether weight normalization shall be used\n data_normalization: (boolean) whether to normalize the data (X and Y) to exhibit zero-mean and std\n dropout: (float) the probability of switching off nodes during training\n random_seed: (optional) seed (int) of the random number generators used\n \"\"\"\n\n def __init__(self, name, ndim_x, ndim_y, center_sampling_method='k_means', n_centers=50, keep_edges=True,\n init_scales='default', hidden_sizes=(16, 16), hidden_nonlinearity=tf.nn.tanh, train_scales=True,\n n_training_epochs=1000, x_noise_std=None, y_noise_std=None, adaptive_noise_fn=None, entropy_reg_coef=0.0,\n weight_decay=0.0, weight_normalization=True, data_normalization=True, dropout=0.0, l2_reg=0.0, l1_reg=0.0,\n random_seed=None):\n\n Serializable.quick_init(self, locals())\n self._check_uniqueness_of_scope(name)\n\n self.name = name\n self.ndim_x = ndim_x\n self.ndim_y = ndim_y\n\n self.random_seed = random_seed\n self.random_state = np.random.RandomState(seed=random_seed)\n tf.set_random_seed(random_seed)\n\n self.n_centers = n_centers\n\n self.hidden_sizes = hidden_sizes\n self.hidden_nonlinearity = hidden_nonlinearity\n\n self.n_training_epochs = n_training_epochs\n\n # center sampling parameters\n self.center_sampling_method = center_sampling_method\n self.keep_edges = keep_edges\n\n # regularization parameters\n self.x_noise_std = x_noise_std\n self.y_noise_std = y_noise_std\n self.adaptive_noise_fn = adaptive_noise_fn\n self.entropy_reg_coef = entropy_reg_coef\n self.weight_decay = weight_decay\n self.l2_reg = l2_reg\n self.l1_reg = l1_reg\n self.weight_normalization = weight_normalization\n self.data_normalization = data_normalization\n self.dropout = dropout\n\n if type(init_scales) is str and init_scales == 'default':\n init_scales = np.array([0.7, 0.3])\n\n self.n_scales = len(init_scales)\n self.train_scales = train_scales\n self.init_scales = init_scales\n # Transform scales so that the softplus will result in passed init_scales\n self.init_scales_softplus = [np.log(np.exp(s) - 1) for s in init_scales]\n\n self.can_sample = True\n self.has_pdf = True\n self.has_cdf = True\n\n self.fitted = False\n\n # build tensorflow model\n self._build_model()\n\n def fit(self, X, Y, eval_set=None, verbose=True):\n \"\"\" Fits the conditional density model with provided data\n\n Args:\n X: numpy array to be conditioned on - shape: (n_samples, n_dim_x)\n Y: numpy array of y targets - shape: (n_samples, n_dim_y)\n eval_set: (tuple) eval/test set - tuple (X_test, Y_test)\n verbose: (boolean) controls the verbosity (console output)\n \"\"\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=True)\n\n if eval_set is not None:\n eval_set = self._handle_input_dimensionality(*eval_set)\n\n self._setup_inference_and_initialize()\n\n # data normalization if desired\n if self.data_normalization: # this must happen after the initialization\n self._compute_data_normalization(X, Y) # computes mean & std of data and assigns it to tf graph for normalization\n Y_normalized = (Y - self.data_statistics['Y_mean']) / (self.data_statistics['Y_std'] + 1e-8)\n else:\n Y_normalized = Y\n\n self._compute_noise_intensity(X, Y)\n\n # sample locations and assign them to tf locs variable\n sampled_locs = sample_center_points(Y_normalized, method=self.center_sampling_method, k=self.n_centers,\n keep_edges=self.keep_edges, random_state=self.random_state)\n self.sess.run(tf.assign(self.locs, sampled_locs))\n\n # train the model\n self._partial_fit(X, Y, n_epoch=self.n_training_epochs, eval_set=eval_set, verbose=verbose)\n self.fitted = True\n\n if verbose:\n print(\"optimal scales: {}\".format(self.sess.run(self.scales)))\n\n def _build_model(self):\n \"\"\"\n implementation of the KMN\n \"\"\"\n with tf.variable_scope(self.name):\n # add placeholders, data_normalization and data_noise if desired. Also sets up the placeholder for dropout prob\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n self.X_in = L.get_output(self.layer_in_x)\n self.Y_in = L.get_output(self.layer_in_y)\n\n # get batch size\n self.batch_size = tf.shape(self.X_ph)[0]\n\n # create core multi-layer perceptron\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=self.n_centers*self.n_scales,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n self.core_output_layer = core_network.output_layer\n\n # weights of the mixture components\n self.logits = L.get_output(self.core_output_layer)\n self.softmax_layer_weights = L.NonlinearityLayer(self.core_output_layer, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # locations of the kernelfunctions\n self.locs = tf.Variable(np.zeros((self.n_centers, self.ndim_y)), name=\"locs\", trainable=False, dtype=tf.float32) # assign sampled locs when fitting\n self.locs_layer = L.VariableLayer(core_network.input_layer, (self.n_centers, self.ndim_y), variable=self.locs, name=\"locs\", trainable=False)\n\n self.locs_array = tf.unstack(tf.transpose(tf.multiply(tf.ones((self.batch_size, self.n_centers, self.ndim_y)), self.locs), perm=[1, 0, 2]))\n assert len(self.locs_array) == self.n_centers\n\n # scales of the gaussian kernels\n log_scales_layer = L.VariableLayer(core_network.input_layer, (self.n_scales,),\n variable=tf.Variable(self.init_scales_softplus, dtype=tf.float32, trainable=self.train_scales),\n name=\"log_scales\", trainable=self.train_scales)\n\n self.scales_layer = L.NonlinearityLayer(log_scales_layer, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.scales_layer)\n self.scales_array = scales_array = tf.unstack(tf.transpose(tf.multiply(tf.ones((self.batch_size, self.ndim_y, self.n_scales)), self.scales), perm=[2,0,1]))\n assert len(self.scales_array) == self.n_scales\n\n # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc in self.locs_array for scale in scales_array]\n self.mixture = mixture = Mixture(cat=cat, components=components)\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = tf.transpose(tf.multiply(tf.ones((self.ndim_y, self.n_scales)), self.scales)) * self.std_y_sym # shape = (n_scales, ndim_y)\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = tf.transpose(tf.multiply(tf.ones((self.ndim_y, self.n_scales)), self.scales)) # shape = (n_scales, ndim_y)\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.core_output_layer, self.locs_layer, self.scales_layer, self.layer_in_y])\n\n def _param_grid(self):\n param_grid = {\n \"n_training_epochs\": [500, 1000],\n \"n_centers\": [50, 200],\n \"x_noise_std\": [0.15, 0.2, 0.3],\n \"y_noise_std\": [0.1, 0.15, 0.2]\n }\n return param_grid\n\n def _get_mixture_components(self, X):\n assert self.fitted\n\n locs, weights, scales = self.sess.run([self.locs_unnormalized, self.weights, self.scales_unnormalized], feed_dict={self.X_ph: X})\n\n locs = np.concatenate([np.tile(np.expand_dims(locs[i:i+1], axis=1), (X.shape[0], self.n_scales, 1)) for i in range(self.n_centers)], axis=1)\n cov = np.tile(np.expand_dims(scales, axis=0), (X.shape[0], self.n_centers, 1))\n\n assert weights.shape[0] == locs.shape[0] == cov.shape[0] == X.shape[0]\n assert weights.shape[1] == locs.shape[1] == cov.shape[1] == self.n_centers*self.n_scales\n assert locs.shape[2] == cov.shape[2] == self.ndim_y\n assert locs.ndim == 3 and cov.ndim == 3 and weights.ndim == 2\n return weights, locs, cov\n\n def __str__(self):\n return \"\\nEstimator type: {}\\n center sampling method: {}\\n n_centers: {}\\n keep_edges: {}\\n init_scales: {}\\n train_scales: {}\\n \" \\\n \"n_training_epochs: {}\\n x_noise_std: {}\\n y_noise_std: {}\\n\".format(self.__class__.__name__, self.center_sampling_method, self.n_centers,\n self.keep_edges, self.init_scales_softplus, self.train_scales, self.n_training_epochs, self.x_noise_std,\n self.y_noise_std)\n","repo_name":"freelunchtheorem/Conditional_Density_Estimation","sub_path":"cde/density_estimator/KMN.py","file_name":"KMN.py","file_ext":"py","file_size_in_byte":12079,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"81"} +{"seq_id":"34437083508","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom pprint import pprint\nfrom jcamp import JCAMP_reader\n\n\ndef shift_ms_data(x_values, jcamp_dict):\n \"\"\"\n Shifts the mass spec data by subtracting by the mass of the molecule.\n \"\"\"\n\n try:\n # Get the molar mass\n molar_mass = round(float(jcamp_dict['mw']))\n\n except Exception:\n pprint(jcamp_dict)\n exit(1)\n\n x_values = molar_mass - x_values\n\n x_values[x_values < 0] = 0\n\n return x_values\n\n\ndef create_mass_spec_df(mass_spec_path, x_max_bin=501, mass_shift=False):\n \"\"\"\n Creates a new data frame for mass spec data.\n\n Parameters:\n mass_spec_path:\n A path to where all the mass spec jdx files are stored.\n\n x_max_bin:\n The maximum mass spec value to be captured.\n\n Return:\n Returns a dataframe where the indexes are mass spec buckets and new\n columns represent new columns.\n \"\"\"\n\n x_bins = np.arange(0, x_max_bin, 1)\n main_df = pd.DataFrame({'x': pd.cut(np.arange(1, 499, 50), x_bins)})\n main_df.set_index('x', inplace=True)\n\n print(\"extracting data...\")\n\n for jdx_file in os.listdir(mass_spec_path):\n\n if not jdx_file.endswith('.jdx'):\n continue\n\n jcamp_dict = JCAMP_reader(os.path.join(mass_spec_path, jdx_file))\n\n x_values = jcamp_dict['x'] * float(jcamp_dict['xfactor'])\n y_values = jcamp_dict['y'] * float(jcamp_dict['yfactor'])\n\n cas_idx = jcamp_dict['cas registry no'].replace('-', '')\n\n if mass_shift:\n x_values = shift_ms_data(x_values, jcamp_dict)\n\n # Scale by largest y val\n y_values = y_values / max(y_values)\n\n # Construct a temporary df for the molecule to store its x and y values\n single_df = pd.DataFrame({'x': x_values, cas_idx: y_values})\n single_df['x'] = pd.cut(single_df['x'], x_bins)\n single_df = single_df.groupby('x').aggregate(np.mean).fillna(0)\n\n main_df = main_df.merge(single_df, on='x', how='outer')\n\n return main_df\n\n\ndef main():\n MASS_SPEC_PATH = os.path.join('data', 'mass')\n mass_spec_df = create_mass_spec_df(MASS_SPEC_PATH, mass_shift=True)\n # pprint(mass_spec_df)\n\n # Construct the output path for the dataframe\n DF_PATH = os.path.join('data', 'MASS_SPEC_DF_SHIFT.csv')\n\n # Pickle the dataframe\n mass_spec_df.to_csv(DF_PATH)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Michae1CC/STAT4402_PROJECT_CODE","sub_path":"src_data/extract_mass_spec.py","file_name":"extract_mass_spec.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74756226185","text":"from itertools import chain\nfrom stats import Stats\n\n\nclass Decoder(object):\n ID = None\n PRINT = False\n STATS = None\n COUNTER = None\n\n @classmethod\n def init_class(cls):\n if cls.STATS is None:\n cls.STATS = Stats()\n cls.COUNTER = 0\n return cls\n\n @classmethod\n def init_subclasses(cls):\n assert cls is Decoder, 'Can only be called on parent class'\n cls.__name__ = ''\n cls.CLSMAP = {c.ID: c.init_class() for c in cls.__subclasses__()}\n\n @classmethod\n def lookup_class(cls, id):\n Cls = cls.CLSMAP.get(id, False)\n if Cls is False:\n Cls = type('Generic', (Decoder, ), {})\n Cls.ID = id\n cls.CLSMAP[id] = Cls.init_class()\n return Cls\n\n @classmethod\n def factory(cls, id, **kwargs):\n Cls = cls.lookup_class(id)\n return Cls(id=id, **kwargs)\n\n def __init__(self, status, timestamp, payload, length, id, channel):\n self.status = status\n self.timestamp = timestamp\n self.payload = tuple(int(v or '0', 16) for v in payload)\n self.STATS.add_sample(self.timestamp, *self.payload)\n self.COUNTER += 1\n self.length = length\n self.id = id\n self.channel = channel\n self.unknown = tuple()\n self.__knownkeys = list(self.__dict__.keys()) + ['_Decoder__knownkeys']\n self.process()\n\n def __eq__(self, other):\n if self.id != other.id:\n return False\n if self.unknown and other.unknown:\n return self.unknown == other.unknown\n\n def __subrepr__(self):\n new_keys = set(self.__dict__.keys())\n return (\n '{}={}'.format(key, self.__dict__[key])\n for key in new_keys.difference(self.__knownkeys))\n\n def __repr__(self):\n sub = self.__subrepr__()\n if self.unknown:\n sub = chain(sub, ('unknown={}'.format(self.unknown), ))\n\n return '<{}-{} {}>'.format(\n self.__class__.__name__, self.__class__.ID, ' '.join(sub))\n\n def process(self):\n self.unknown = self.payload\n\n\nclass ABSWheels(Decoder):\n ID = '4B0'\n WHEELS = ('f_l', 'f_r', 'r_l', 'r_r')\n\n def __subrepr__(self):\n return ('{}={}'.format(w, self.__dict__[w]) for w in self.WHEELS)\n\n def process(self):\n for i, wheel in enumerate(self.WHEELS):\n i = i * 2\n v = (((self.payload[i] << 8) + self.payload[i+1]) - 10000) / 100.0\n self.__dict__[wheel] = v\n\n\nclass Doors(Decoder):\n ID = '433'\n DOORS = ('T', 'RR', 'RL', 'FR', 'FL')\n\n def __subrepr__(self):\n return ('doors={} {}'.format(self.doors, self.value),)\n\n def process(self):\n self.value = self.payload[0]\n self.doors = []\n for i, door in enumerate(self.DOORS):\n if self.value & (1 << (i + 3)):\n self.doors.append(door)\n self.unknown = self.payload[1:]\n\n\nclass Odometer(Decoder):\n ID = '4F2'\n\n def process(self):\n self.range = self.payload[0]\n self.km = (self.payload[1] << 8) + self.payload[2]\n self.unknown = self.payload\n\n\nclass EngineGas(Decoder):\n ID = '201'\n\n def process(self):\n self.rpm = (self.payload[0] << 8) + self.payload[1]\n self.speed = ((self.payload[4] << 8) + self.payload[5]) / 100\n self.accelerator = self.payload[6]\n UNK = (2, 3, 7)\n self.unknown = tuple(self.payload[i] for i in UNK)\n\n\nclass Compass(Decoder):\n ID = '2BA'\n # PRINT = True\n\n def process(self):\n self.heading = self.payload[4]\n self.unknown = self.payload\n\n\nclass Print(Decoder):\n ID = ''\n PRINT = True\n\n def __subrepr__(self):\n return ('id={}'.format(self.ID),)\n\nDecoder.init_subclasses()\n","repo_name":"GothAck/canbustriple-ford-analysis","sub_path":"decoders.py","file_name":"decoders.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19006533429","text":"import sys\nimport os\nimport math\nimport time \nimport numpy as np\nfrom collections import Counter\n\ndef asMinutes(s): # get current time.time() to minutes\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\ndef timeSince(since, percent): # get time since start\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\ndef get_data_from_file(train_file, batch_size, seq_size):\n with open(train_file, 'r') as f:\n text = f.read().lower()\n text = text.split()\n # random samples from text for test\n random_samples = []\n for sec in range(30):\n start_pos = np.random.randint(0, len(text))\n random_samples.append(text[start_pos:start_pos+10])\n \n word_counts = Counter(text)\n sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)\n int_to_vocab = {k: w for k, w in enumerate(sorted_vocab)}\n vocab_to_int = {w: k for k, w in int_to_vocab.items()}\n n_vocab = len(int_to_vocab)\n\n print('Vocabulary size in',train_file,':', n_vocab)\n\n int_text = [vocab_to_int[w] for w in text]\n num_batches = int(len(int_text) / (seq_size * batch_size))\n in_text = int_text[:num_batches * batch_size * seq_size]\n out_text = np.zeros_like(in_text)\n out_text[:-1] = in_text[1:]\n out_text[-1] = in_text[0]\n\n in_text = np.reshape(in_text, (batch_size, -1))\n out_text = np.reshape(out_text, (batch_size, -1))\n return int_to_vocab, vocab_to_int, n_vocab, in_text, out_text, random_samples\n\ndef get_batches(in_text, out_text, batch_size, seq_size):\n num_batches = np.prod(in_text.shape) // (seq_size * batch_size)\n for i in range(0, num_batches * seq_size, seq_size):\n yield in_text[:, i:i+seq_size], out_text[:, i:i+seq_size]\n\ndef array_to_vocab(array, int_to_vocab):\n return ' '.join([int_to_vocab[w] for w in array])\n\nclass RedirectStdout():\n stdout = sys.stdout\n stdout_log = []\n\n def start(self):\n sys.stdout = self\n\n def stop(self):\n sys.stdout = self.stdout\n print(\"\".join([i for i in self.stdout_log]))\n stdout_log = []\n\n def write(self, text):\n self.stdout_log.append(text)\n\n def flush(self):\n pass\n\n def sclear(self):\n if os.sys.platform == 'win32':\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n self.stdout_log = []\n\nif __name__ == '__main__':\n int_to_vocab, vocab_to_int, n_vocab, in_text, out_text, random_sentece = get_data_from_file('asimov.txt', 64,5)\n b = get_batches(in_text, out_text, 64,2)\n for x,y in b:\n print(array_to_vocab(x[0], int_to_vocab), '-', array_to_vocab(y[0], int_to_vocab))\n","repo_name":"hwpoison/text-generator-pytorch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6190827278","text":"import cv2\nimport numpy as np\nfrom object_detection import ObjectDetection\n\n#net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')\n#classes = []\n#with open('coco.names', 'r') as f:\n# classes = f.read().splitlines()\n#print(classes)\n\n# Initialize Object Detection\nod = ObjectDetection() # Default: yolov3\n#od = ObjectDetection(\"models/yolov2-tiny.weights\", \"models/yolov2-tiny.cfg\")\n\ncap = cv2.VideoCapture(\"videos/los_angeles.mp4\")\n#cap = cv2.VideoCapture(\"videos/softball.mp4\")\n#cap = cv2.VideoCapture(\"videos/softball2.mp4\")\n#cap = cv2.VideoCapture(\"videos/dog.mp4\")\n\nwhile True:\n val, frame = cap.read()\n # if no more frames left in video\n if not val:\n break \n \n # Point current frame\n center_points_cur_frame = []\n\n # Detect objects on frame\n (class_ids, scores, boxes) = od.detect(frame)\n font = cv2.FONT_HERSHEY_PLAIN # Font of the text put above rectangle\n #for (box, class_id, score) in zip(boxes, class_ids, scores):\n for (box, class_id) in zip(boxes, class_ids):\n detected_class_label = od.classes[class_id]\n (x, y, w, h) = box\n cx = int((x + x + w) / 2)\n cy = int((y + y + h) / 2)\n center_points_cur_frame.append((cx, cy))\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n #cv2.putText(frame, detected_class_label + \"(\" + f'{score:.2f}' + \")\", (x, y+20), font, 2, (255,255,255), 2)\n cv2.putText(frame, detected_class_label, (x, y+20), font, 2, (255,255,255), 2) \n\n cv2.imshow(\"Frame\", frame)\n\n key = cv2.waitKey(1)\n if (key & 0xFF) == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"ErenGurs/YOLO","sub_path":"object_tracking.py","file_name":"object_tracking.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8813900248","text":"# Given a roman numeral, convert it to an integer.\n\n# Input is guaranteed to be within the range from 1 to 3999.\n\n\nclass Solution(object):\n\n def romanToInt(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n table = {'M': 1000, 'D': 500, 'C': 100,\n 'L': 50, 'X': 10, 'V': 5, 'I': 1}\n res, p = 0, 'I'\n for i in s[::-1]:\n if table[i] < table[p]:\n res = res - table[i]\n else:\n res = res + table[i]\n p = i\n return res\n","repo_name":"Hanaasagi/pyLeetCode","sub_path":"algorithms/013. Roman to Integer.py","file_name":"013. Roman to Integer.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14179029757","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass GitHubUserItem(scrapy.Item):\n user_id = scrapy.Field()\n user_name= scrapy.Field()\n email = scrapy.Field()\n location = scrapy.Field()\n url = scrapy.Field()\n intro = scrapy.Field()\n avatar_url = scrapy.Field()\n company = scrapy.Field()\n reps_num = scrapy.Field()\n stars_num = scrapy.Field()\n followers_num = scrapy.Field()\n following_num = scrapy.Field()\n contr_num = scrapy.Field()\n\n def getFromDict(self, user):\n self['user_id'] = user['user_id']\n self['user_name'] = user['user_name']\n self['email'] = user['email']\n self['location'] = user['location']\n self['url'] = user['url']\n self['intro'] = user['intro']\n self['avatar_url'] = user['avatar_url']\n self['company'] = user['company']\n self['reps_num'] = user['reps_num']\n self['stars_num'] = user['stars_num']\n self['followers_num'] = user['followers_num']\n self['following_num'] = user['following_num']\n self['contr_num'] = user['contr_num']\n\n def convertDict(self):\n res = {}\n res['user_name'] = self.user_name\n res['email'] = self.email\n res['location'] = self.location\n res['url'] = self.url\n res['intro'] = self.intro\n res['avatar_url'] = set.avatar_url\n res['company'] = self.company\n res['reps_nums'] = self.reps_nums\n res['starts_num'] = self.starts_num\n res['followers_num'] = self.followers_num\n res['following_num'] = self.following_num\n\nclass GitHubRepItem(scrapy.Item):\n user_id = scrapy.Field()\n rep_name = scrapy.Field()\n rep_lang = scrapy.Field()\n rep_id = scrapy.Field()\n commits_num = scrapy.Field()\n forks_num =scrapy.Field()\n stars_num = scrapy.Field()\n forked = scrapy.Field()\n\n def getFromDict(self, rep):\n self['user_id'] = rep['user_id']\n self['rep_name'] = rep['rep_name']\n self['rep_lang'] = rep['rep_lang']\n self['rep_id'] = rep['rep_id']\n self['commits_num'] = rep['commits_num']\n self['forks_num'] = rep['forks_num']\n self['stars_num'] = rep['stars_num']\n self['forked'] = rep['forked']\n","repo_name":"Sixzeroo/GithubCrawler","sub_path":"github/github/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"4707211662","text":"\nimport _keyboard_helper as _helper\n\ndef keyup(key):\n '''send special character up'''\n li = _helper.parse_keys(key)\n for item in li:\n _helper.inputvkchar(_helper.keyboard_vkcode[_helper.specialchartovk[item]],False)\n \n\ndef keydown(key):\n '''send special character down'''\n li = _helper.parse_keys(key)\n for item in li:\n _helper.inputvkchar(_helper.keyboard_vkcode[_helper.specialchartovk[item]],True)\n \n\ndef sendkeys(keys):\n li = _helper.parse_keys(_helper.decode_key(keys))\n for item in li:\n if _helper.specialchartovk.get(item,None) != None :\n _helper.sendspecialchar(item)\n continue\n _helper.inputunicodechar(ord(item),True)\n _helper.inputunicodechar(ord(item),False)\n ","repo_name":"xianfeng0115/PyDiagnosis","sub_path":"API/modechange/zeus/osinput/windows/_keyboard.py","file_name":"_keyboard.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32545246851","text":"# DQN/DDQN hyperparams and constants\nEPSILON_DECAY_RATE = 0.99\nMIN_EPSILON = 0.01\nGAMMA = 0.90\nBATCH_SIZE = 32\nLEARNING_RATE = 0.00025\nMEMORY_SIZE = 20000\nCOPY_STEPS = 1e4\nEPSILON = 1\n\n# PPO hyperparams and constants\nSTEP_AMOUNT = 4000\nUPDATE_FREQUENCY = 5\nCLIP_RANGE = 0.2\nACTOR_LEARNING_RATE = 0.00025\nCRITIC_LEARNING_RATE = 0.001\nOPTIMIZER_EPSILON = 1e-4\nPPO_GAMMA = 0.95\nPPO_LAMBDA = 0.95\nPPO_EPOCHS = 30\n\n\n# Env constants\nCHECKPOINT_AMOUNT = 10\nREPLAY_FRAME_COUNT = 4\nSKIP_AND_STACK_AMOUNT = 4\nENV_HEIGHT = 84\nENV_WIDTH = 84\n\n# WandB constants\n# These needs to be changed to reflect your wandb project\nWANDB_ENTITY = \"idatt2502-project\"\nWANDB_PPO_PROJECT = \"super-mario-ppo\"\nWANDB_DDQN_PROJECT = \"super-mario-ddqn\"\nWANDB_DQN_PROJECT = \"super-mario-dqn\"\nMIN_WANDB_VIDEO_REWARD = 1000\n\nMODEL_SAVE_DIR = \"trained_models\"\n\nPPO_MODEL_SAVE_NAME = f\"{MODEL_SAVE_DIR}/PPO-Mario-v0.pt\"\nMODEL_SAVE_NAME = f\"{MODEL_SAVE_DIR}/DDQN-MARIO-v1.pt\"\nTARGET_MODEL_SAVE_NAME = f\"{MODEL_SAVE_DIR}/Target-DDQN-MARIO-v1.pt\"\n\nREPLAY_BUFFER_DIR = \"memory\"\n\nSTATE_SAVE_NAME = f\"{REPLAY_BUFFER_DIR}/state_mem.pt\"\nACTION_SAVE_NAME = f\"{REPLAY_BUFFER_DIR}/action_mem.pt\"\nREWARD_SAVE_NAME = f\"{REPLAY_BUFFER_DIR}/reward_mem.pt\"\nNEXT_STATE_SAVE_NAME = f\"{REPLAY_BUFFER_DIR}/next_state_mem.pt\"\nDONE_SAVE_NAME = f\"{REPLAY_BUFFER_DIR}/done_mem.pt\"\n\n\nPICKLE_FOLDER_NAME = \"pickle\"\nTOTAL_REWARDS_PICKLE = f\"{PICKLE_FOLDER_NAME}/total_rewards.pkl\"\nNUM_IN_QUEUE_PICKLE = f\"{PICKLE_FOLDER_NAME}/num_in_queue.pkl\"\nENDING_POSISTION_PICKLE = f\"{PICKLE_FOLDER_NAME}/ending_position.pkl\"\n\nWORLD = \"SuperMarioBros-1-1-v0\"\nEPISODES = 5000\n\nAGENT_ACTIONS = [[\"right\"], [\"right\", \"A\"]]\n","repo_name":"olros/NTNU","sub_path":"IDATT2502 Anvendt maskinlæring med prosjekt/project/src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13036051527","text":"from unittest import TestCase\nfrom . utils import predictSliceLength, makeStepPositive\n\nclass TestPredictSliceLength(TestCase):\n def testSimple(self):\n self.check(0, 10, 1)\n self.check(0, 10, 2)\n self.check(0, 10, 11)\n\n self.check(1, 10, 1)\n self.check(2, 10, 1)\n self.check(11, 10, 1)\n\n self.check(10, 0, -1)\n self.check(10, 0, -2)\n self.check(10, 3, -3)\n self.check(3, 3, -3)\n\n def check(self, start, stop, step):\n real = len(tuple(range(start, stop, step)))\n calculated = predictSliceLength(start, stop, step)\n self.assertEqual(real, calculated)\n\nclass TestMakeStepPositive(TestCase):\n def test(self):\n self.assertEqual(makeStepPositive(9, -1, -2), (1, 10, 2))\n self.assertEqual(makeStepPositive(10, 5, -1), (6, 11, 1))\n","repo_name":"JacquesLucke/animation_nodes","sub_path":"animation_nodes/data_structures/lists/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":2231,"dataset":"github-code","pt":"81"} +{"seq_id":"3448907581","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2021/8/6 10:12\r\n# @Author : Guru\r\n# @QQ:2450967798\r\n# @File : 神经网络转c语言.py\r\n\r\nimport numpy as np\r\nfrom keras import models\r\nfrom keras.models import Model\r\n\r\n# 各层神经元数\r\ndense_cell_Num = []\r\n\r\n\r\ndef read_network(file_name):\r\n model = models.load_model(file_name)\r\n dense_list = []\r\n print(\"开始读取权重...\")\r\n for i in range(1, 20): # 最大19层 该数字可以加大\r\n dense = []\r\n try:\r\n weight_Dense, bias_Dense = model.get_layer('dense_{}'.format(i)).get_weights()\r\n except:\r\n break\r\n\r\n # 遍历每一个圈中的目的是为了四舍五入权重(我使用numpy.around转换失败,所以使用遍历)\r\n weight_list = [list(item) for item in weight_Dense]\r\n for index1, y in enumerate(weight_list):\r\n for index2, x in enumerate(y):\r\n weight_list[index1][index2] = round(x, 4)\r\n # print(\"{}层网络的总共权重为\".format(i),len_temp.shape[1]*len_temp.shape[0])\r\n weight_str = str(weight_list)\r\n # print(weight_str)\r\n weight_str = weight_str.replace(\"[\", '{')\r\n weight_str = weight_str.replace(\"]\", '}')\r\n # print(weight_str)\r\n dense.append(weight_str)\r\n bias_list = [item for item in bias_Dense]\r\n print(\"该层偏差数有{}\".format(len(bias_list)))\r\n dense_cell_Num.append(len(bias_list))\r\n bias_str = str(bias_list)\r\n bias_str = bias_str.replace(\"[\", '{')\r\n bias_str = bias_str.replace(\"]\", '}')\r\n dense.append(bias_str)\r\n dense_list.append(dense)\r\n print(\"*\" * 100)\r\n return dense_list\r\n\r\n\r\ndef test_model(model_name):\r\n print(\"开始测试...\")\r\n model = models.load_model(model_name)\r\n a = np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]])\r\n y = model.predict(a)\r\n print(\"测试结果为:\", y)\r\n\r\n\r\ndef copy_weight_to_C(dense_list, act, txtname):\r\n # 网络层数\r\n dense_Num = len(dense_list)\r\n head_code = '''\r\n# include\r\n# include\r\n// 单层神经元个数定义\r\n#define dense0_num {0}\r\n#define input_num {0}'''.format(dense_cell_Num[0])\r\n for index, cell_Num in enumerate(dense_cell_Num):\r\n head_code = head_code + '''\r\n#define dense{}_num {}'''.format(index + 1, cell_Num)\r\n # print(head_code)\r\n\r\n # 先添加第一层网络参数权重\r\n head_code += '''\r\nfloat weight_array_1[input_num][dense1_num]={};\r\nfloat bias_array_1[dense1_num]={};\r\n'''.format(dense_list[0][0], dense_list[0][1])\r\n # print(head_code)\r\n for index, weight in enumerate(dense_list):\r\n if index == 0: continue\r\n head_code += '''\r\nfloat weight_array_{}[dense{}_num][dense{}_num]={};\r\nfloat bias_array_{}[dense{}_num]={};\r\n'''.format(index + 1, index, index + 1, dense_list[index][0],\r\n index + 1, index + 1, dense_list[index][1])\r\n # print(head_code)\r\n head_code += '''\r\n//激活函数\r\nfloat my_sigmoid(float x)\r\n{\r\n\tfloat e=2.71828,y;\r\n\ty=1/(1+pow(e,-x));\r\n\treturn y;\r\n}\r\nfloat my_tanh(float x)\r\n{\r\n\tfloat e=2.71828,y;\r\n\ty=((pow(e,x)-pow(e,-x))/(pow(e,x)+pow(e,-x)));\r\n\treturn y;\r\n}\r\nfloat my_relu(float x)\r\n{\r\n\tfloat y;\r\n\tif (x>=0)\r\n\t\ty=x;\r\n\telse\r\n\t\ty=0;\r\n\treturn y;\r\n}\r\n''' # 添加激活函数\r\n # print(head_code)\r\n for item in range(dense_Num):\r\n head_code += '''\r\nfloat out{}[dense{}_num] ={{0}};'''.format(item + 1, item + 1)\r\n\r\n for num in range(dense_Num):\r\n head_code += '''\r\nvoid desen_{0}(float input_data[dense{2}_num])\r\n{{\r\n\tfloat x,y;\r\n\tint i=0,j=0,input=dense{2}_num,out=dense{0}_num;\r\n\t//开始计算\r\n\tfor(i=0;i int:\n four_signals = [i for i in input[: singnal_length - 1]] + [None]\n for i in range(singnal_length - 1, len(input), 1):\n four_signals[i % singnal_length] = input[i]\n if len(four_signals) == len(set(four_signals)):\n return i + 1\n\n\nif __name__ == \"__main__\":\n with input_data_path.open(\"r\") as input_file:\n print(get_marker_position(input_file.read(), 14))\n","repo_name":"Vvitek/coding-for-fun","sub_path":"t_06/t_06.py","file_name":"t_06.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15783340454","text":"import requests\r\nimport json\r\n\r\nimport time\r\n\r\nfor i in range(16,1000):\r\n data = {\r\n \"id\": i,\r\n \"title\": \"article%s\"%i,\r\n \"content\": \"content%s\"%i,\r\n \"author\": 1\r\n }\r\n res = requests.post('http://127.0.0.1:8000/api/article/', data)\r\n print(res.text)","repo_name":"peacefulyin/BeiqiArtProduct","sub_path":"BeiqiArtDjango/apps/article/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22098771911","text":"from requests import Session as _Session\nfrom requests.exceptions import ConnectionError, ChunkedEncodingError, Timeout, HTTPError\nfrom requests.adapters import HTTPAdapter\nimport logging\nimport time\nfrom .cookiejar import ClientCookieJar\n\ntry:\n from fake_useragent import UserAgent\nexcept ImportError:\n UserAgent = None\n ua = None\n ua_str = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'\nelse:\n ua = UserAgent(fallback='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36')\n ua_str = ua.chrome\n\nsession_logger = logging.getLogger('showroom.session')\n\n\nclass ClientSession(_Session):\n \"\"\"\n Wrapper for requests.Session.\n\n Mainly used to catch temporary errors and set a Timeout\n\n Overrides requests.Session.get() and increases max pool size\n\n Raises:\n May raise TimeoutError, ConnectionError, HTTPError, or ChunkedEncodingError\n if retries are exceeded.\n \"\"\"\n\n # TODO: set pool_maxsize based on config\n def __init__(self, pool_maxsize=100):\n super().__init__()\n self.cookies = ClientCookieJar()\n https_adapter = HTTPAdapter(pool_maxsize=pool_maxsize)\n self.mount('https://www.showroom-live.com', https_adapter)\n self.headers = {\"User-Agent\": ua_str}\n\n # TODO: post\n def get(self, url, params=None, max_delay=30.0, max_retries=20, **kwargs):\n error_count = 0\n wait = 0\n timeouts = 0\n while True:\n try:\n r = super().get(url, params=params, timeout=(3.0, 15.0), **kwargs)\n r.raise_for_status()\n except Timeout as e:\n session_logger.debug('Timeout while fetching {}: {}'.format(url, e))\n timeouts += 1\n wait = min(2 * 1.5 ** timeouts, max_delay*4)\n\n if timeouts > max_retries:\n session_logger.error('Max timeouts exceeded while fetching {}: {}'.format(url, e))\n # raise\n elif timeouts > max_retries // 2:\n session_logger.warning('{} timeouts while fetching {}: {}'.format(timeouts, url, e))\n\n except ChunkedEncodingError as e:\n session_logger.debug('Chunked encoding error while fetching {}: {}'.format(url, e))\n error_count += 1\n wait = min(wait + error_count, max_delay)\n\n if error_count > max_retries:\n session_logger.warning('Max retries exceeded while fetching {}: {}'.format(url, e))\n raise\n\n except HTTPError as e:\n status_code = e.response.status_code\n session_logger.debug('{} while fetching {}: {}'.format(status_code, url, e))\n\n error_count += 1\n wait = min(wait + 2 + error_count, max_delay)\n\n # Some of these aren't recoverable\n if status_code == 404:\n session_logger.error('Getting {} failed permanently: 404 page not found'.format(url))\n raise # PageNotFoundError(e) # ?\n elif status_code == 403:\n session_logger.error('Getting {} failed permanently: 403 permission denied'.format(url))\n raise # specific error?\n elif status_code == 402:\n session_logger.error('Getting {} failed permanently: '\n '401 auth required (not implemented)'.format(url))\n raise\n elif status_code == 429:\n session_logger.error('Too many requests while getting {}: {}'.format(url, e))\n wait += 5 * 60.0\n elif 400 <= status_code < 500:\n session_logger.error('Getting {} failed permanently: {}'.format(url, e))\n raise\n\n if error_count > max_retries:\n session_logger.warning('Max retries exceeded while fetching {}: {}'.format(url, e))\n raise\n\n except ConnectionError as e:\n session_logger.debug('ConnectionError while accessing {}: {}'.format(url, e))\n\n error_count += 1\n wait = min(wait + 2 * error_count, max_delay)\n\n # ConnectionErrors are assumed to be always recoverable\n # if error_count > max_retries:\n # session_logger.warning('Max retries exceeded while fetching {}: {}'.format(url, e))\n # raise\n\n else:\n return r\n\n session_logger.debug('Retrying in {} seconds...'.format(wait))\n time.sleep(wait)","repo_name":"wlerin/showroom","sub_path":"showroom/api/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"81"} +{"seq_id":"73701777157","text":"import statistics \nimport copy \nimport json\ndef compute_better_zoom_factor(orientation, orientation_to_current_car_boxes, orientation_to_current_person_boxes):\n all_boxes = []\n if orientation in orientation_to_current_car_boxes:\n all_boxes.extend(orientation_to_current_car_boxes[orientation])\n if orientation in orientation_to_current_person_boxes:\n all_boxes.extend(orientation_to_current_person_boxes[orientation])\n if len(all_boxes) < 1:\n return orientation[-1:]\n all_boxes = [tuple(list(x)) for x in all_boxes]\n better_zoom_factor = 1\n not_much_spills_outside_zoom_3 = False\n not_much_spills_outside_zoom_2 = False\n excess_spaces = []\n for box in all_boxes:\n excess_space = 0 # (320, 180) to (960, 540) is the inner zoom. anything outside that is considered excess\n if box[0] < 320:\n excess_space += 320 - box[0]\n if box[2] > 960:\n excess_space += box[2] - 960\n if box[1] < 180:\n excess_space += 180 - box[1]\n if box[3] > 540:\n excess_space += box[3] - 540\n excess_spaces.append(excess_space)\n if statistics.mean(excess_spaces) < 15:\n not_much_spills_outside_zoom_3 = True\n else: # check zoom 2\n excess_spaces = []\n for box in all_boxes:\n excess_space = 0 # (320, 180) to (960, 540) is the inner zoom. anything outside that is considered excess\n if box[0] < 160:\n excess_space += 160 - box[0]\n if box[2] > 1120:\n excess_space += box[2] - 1120\n if box[1] < 90:\n excess_space += 90 - box[1]\n if box[3] > 630:\n excess_space += box[3] - 630\n excess_spaces.append(excess_space)\n if statistics.mean(excess_spaces) < 10:\n not_much_spills_outside_zoom_2 = True\n\n distances = []\n for i in range(len(all_boxes)):\n for j in range(len(all_boxes)):\n if i != j:\n box1 = all_boxes[i]\n box2 = all_boxes[j]\n x_left_1 = box1[0]\n y_left_1 = box1[1]\n x_right_1 = box1[2]\n y_right_1 = box1[3]\n x_left_2 = box2[0]\n y_left_2 = box2[1]\n x_right_2 = box2[2]\n y_right_2 = box2[3]\n center_x_1 = (x_left_1+x_right_1)/2.0\n center_y_1 = (y_left_1+y_right_1)/2.0\n center_x_2 = (x_left_2+x_right_2)/2.0\n center_y_2 = (y_left_2+y_right_2)/2.0\n dist = ((center_y_2-center_y_1)**2 + (center_x_2-center_x_1)**2)**0.5\n distances.append(dist)\n if len(distances) <= 1:\n average_distance_between_boxes = 100000\n else:\n average_distance_between_boxes = statistics.mean(distances)\n\n if not_much_spills_outside_zoom_3 and statistics.mean(excess_spaces) > 300:\n better_zoom_factor = 3\n else:\n better_zoom_factor = 1\n\n return str(better_zoom_factor)\n \n\ndef add_zoom_factors(current_formation, orientation_to_current_car_boxes, orientation_to_current_person_boxes, zoom_explorations_in_progress):\n # for certain orientations we have been exploring zoom. lets add those back\n # print(f\"adding zoom factors\")\n # print(json.dumps(zoom_explorations_in_progress, indent=2))\n # print(f\"that was zoom explorations in progress\")\n current_formation_as_set = set(current_formation)\n for orientation in zoom_explorations_in_progress:\n # print(f\"prior zoom restored for {orientation}\")\n num_tries_remaining, target_zoom = zoom_explorations_in_progress[orientation]\n if num_tries_remaining < 0:\n continue\n zoom_explorations_in_progress[orientation] = (zoom_explorations_in_progress[orientation][0]-1, target_zoom)\n if orientation in current_formation_as_set:\n current_formation_as_set.remove(orientation)\n current_formation_as_set.add(set_zoom_factor(orientation, target_zoom))\n\n current_formation_as_set_backup = copy.deepcopy(current_formation_as_set)\n for orientation in current_formation_as_set_backup:\n if orientation.endswith('1'): # currently zoomed out\n if orientation in zoom_explorations_in_progress and zoom_explorations_in_progress[orientation][0] > 1 and orientation.endswith('1'):\n continue\n better_zoom_factor = compute_better_zoom_factor(orientation, orientation_to_current_car_boxes, orientation_to_current_person_boxes)\n if better_zoom_factor != '1' and not orientation.endswith('1'):\n current_formation_as_set.discard(orientation)\n zoom_explorations_in_progress[orientation] = (3, better_zoom_factor)\n orientation = set_zoom_factor(orientation, better_zoom_factor)\n current_formation_as_set.add(orientation)\n # print(json.dumps(zoom_explorations_in_progress, indent=2))\n # input(f\"that was updated zoom explorations\")\n return list(current_formation_as_set), zoom_explorations_in_progress\n\ndef set_zoom_factor(orientation, target_zoom):\n return orientation[:-1] + target_zoom\n\ndef reset_zoom_factor(orientation, anchor_orientation):\n base_zoom = anchor_orientation[-1:]\n return orientation[:-1] + base_zoom\n\ndef reset_zoom_factors(current_formation, anchor_orientation):\n base_zoom = anchor_orientation[-1:]\n temp_output = []\n for orientation in current_formation:\n temp_output.append(orientation[:-1] + base_zoom)\n return temp_output\n\nif __name__ == \"__main__\":\n print(reset_zoom_factors([\"270--15-3\"], \"180-2-2\"))\n\n\n","repo_name":"michaeldwong/madeye-controller","sub_path":"zoom_explore_helper.py","file_name":"zoom_explore_helper.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34700434112","text":"#https://towardsdatascience.com/master-python-dictionary-for-beginners-in-2021-1cdbaa17ec45\r\n\r\n# dictionary and set()\r\n# dictionary: key-value pair \r\n# set: store unique values\r\n\r\ndef uniqueOccurrences(arr):\r\n \r\n dictionary = {} \r\n \r\n for i in arr: \r\n if i in dictionary: \r\n dictionary[i]+=1\r\n \r\n else:\r\n dictionary[i]=1\r\n \r\n return len(dictionary.values()) == len(set(dictionary.values()))\r\n\r\n# test case \r\narr = [1,2,2,1,1,3]\r\nuniqueOccurrences(arr)","repo_name":"vptuan/COMP1819ADS","sub_path":"Lab_07/ANS_ex6.py","file_name":"ANS_ex6.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"62"} +{"seq_id":"11316893569","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nimport base64\nimport logging\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n address = fields.Char(\n string=\"Address\",\n related=\"partner_id.street\",\n tracking=True)\n population_id = fields.Many2one(\n 'res.partner.population',\n string=\"Population\",\n tracking=True)\n sale_type = fields.Selection([\n ('maintenance','Maintenance'),\n ('mounting','Mounting'),\n ('repair','Repair')],string=\"Sale type\")\n type_contract = fields.Selection([\n ('normal','Normal'),\n ('risk','All risk')],string=\"Contract type\")\n is_create_task = fields.Boolean(\n string=\"Create task\",\n tracking=True,\n related=\"sale_type_id.is_create_task\")\n check_contract_type = fields.Boolean(\n compute=\"_compute_check_contract_type\",\n )\n\n type_service_id = fields.One2many(\n 'sale.check.type.contract',\n 'order_id',\n string='Type service'\n )\n pdf_file_sale_contract = fields.Binary(compute=\"action_get_attachment\")\n signature_url_text = fields.Text()\n check_signature = fields.Boolean()\n\n @api.depends('sale_type_id')\n def _compute_check_contract_type(self):\n for record in self:\n record.type_contract = False\n if record.sale_type_id.code == '01':\n record.check_contract_type = True\n else:\n record.check_contract_type = False\n\n\n @api.constrains('contract_line_ids')\n def _check_exist_record_in_lines(self):\n for rec in self:\n exis_record_lines = []\n for line in rec.contract_line_ids:\n if line.contact_id.id in exis_record_lines:\n raise ValidationError(_(\n 'The item should be one per line'))\n exis_record_lines.append(line.contact_id.id)\n\n @api.onchange('type_service_id')\n def get_item_count(self):\n for rec in self:\n count = 1\n for line in rec.type_service_id:\n line.item = count\n count += 1\n\n def get_table_type_contracts(self):\n\n flag = False\n table = '
    '\n for type_service_id in self.type_service_id:\n flag = True\n table += '
  • ' + str(type_service_id.type_service_id.name) + '
  • '\n \n table += '
'\n return table if flag else False\n \n def action_contract_send(self):\n self.ensure_one()\n template = self.env.ref('sat_companies_sale.email_contract_signature')\n lang = self.env.context.get('lang')\n template_id = template.id\n if template.lang:\n lang = template._render_lang(self.ids)[self.id]\n ctx = {\n 'default_model': 'sale.order',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'mark_so_as_sent': True,\n 'custom_layout': \"mail.mail_notification_paynow\",\n 'proforma': self.env.context.get('proforma', False),\n 'force_email': True,\n 'model_description': self.with_context(lang=lang).type_name,\n }\n return {\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(False, 'form')],\n 'view_id': False,\n 'target': 'new',\n 'context': ctx,\n }\n\n def _compute_file_sale_contract(self):\n pdf = self.env.ref('sat_companies_sale.action_email_contract_signature').render_qweb_pdf(self.ids)\n b64_pdf = base64.b64encode(pdf[0])\n \"\"\"\n for record in self:\n pdf_file = self.env.ref('sat_companies_sale.action_email_contract_signature').report_action(self)\n if pdf_file:\n record.pdf_file_sale_contract = pdf_file\n else:\n record.pdf_file_sale_contract = False\n \"\"\"\n @api.depends('check_signature')\n def action_get_attachment(self):\n logging.info('******************action_get_attachment***********************')\n logging.info(self.check_signature)\n for record in self:\n if record.check_signature == True:\n pdf = self.env.ref('sat_companies_sale.action_email_contract_signature')._render_qweb_pdf(self.ids)\n print(pdf)\n b64_pdf = base64.b64encode(pdf[0])\n record.pdf_file_sale_contract = b64_pdf\n else:\n record.pdf_file_sale_contract = False","repo_name":"Eqilibrium-Hub/qr_scanner_option_V14","sub_path":"sat_companies_sale/models/.ipynb_checkpoints/sale_order-checkpoint.py","file_name":"sale_order-checkpoint.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33787391456","text":"N = int(input())\nsm = cnt = 0\nfor i in range(2, N + 1):\n for j in range(2, int(i**0.5 + 1)):\n if not i % j:\n break\n else:\n sm += i\n cnt += 1\nprint(round((sm + 0.0001) / cnt, 2))\n","repo_name":"gbrs/DA_ML","sub_path":"python/2_6_1.py","file_name":"2_6_1.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34897678289","text":"\n\n##### tcp part ####\n\n########## USED LIBRARIES #############\nimport select\nimport socket\nimport sys\nimport time\nimport pickle\nimport struct\n\nimport threading\nimport time\nfrom helpers import hashlib,checksum,safe_open_file,split_data,create_packet,open_packet,config_window,create_finalizer,header_config,file_config, printExtra\n########################################################\n\n\n#### GET ARGUMENTS FROM TERMINAL ######\nserver = str(sys.argv[1])\nudp_listen = int(sys.argv[2])\ntcp_listen = int(sys.argv[3])\n\nudp_sender = int(sys.argv[4])\ntcp_sender = int(sys.argv[5])\n\n########################################\n\n\n## GET HEADER CONFIGURATIONS FROM THE HELPER MODULE\n\n\nheaderSize = header_config[\"size\"]\n\ntcp_file_name = file_config[\"tcp_file_in_name\"]\nudp_file_name = file_config[\"udp_file_in_name\"]\n\n################################################\n\n\n\n### NECESSARY COUNTERS FOR PRINTS\npacket_count = 0\ntotal_time = 0\naverage_time = 0\n##################################\n\n\n## Create TCP socket and connect to server ip and port\nclientsoc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nclientsoc.connect((server,tcp_listen))\n\n#verbose\nif(printExtra):\n print(\"server connection successful\")\n\n\n## open the file which will be transferred\nf = open(tcp_file_name,\"rb\")\n\n#verbose\nif(printExtra):\n print(\"opening file\")\n\n\n# read first data \nchunk = f.read(512)\n\n\ncount = 0\n\n## tcp sender loop\nwhile(chunk):\n\n ##### prepare the chunk #####\n\n # each chunk contains it's header, how long the chunk is\n # this handles the last package of the TCP which may be less than the fixed package size\n header = struct.pack(\"i\",len(chunk))\n \n #### include sending time of the chunk####\n # as mentioned in README, I sent time information as a long integer.\n # Therefore I have took 5 significant figure after the comma.\n # and packed into long in struct\n\n time_in_int = time.time()\n time_in_int = int(time_in_int*100000)\n \n timee = struct.pack(\"q\",time_in_int)\n \n # concatenate the length, time and data\n total = header + timee + chunk\n \n # send data over socket\n clientsoc.send(total)\n\n # read next chunk\n chunk = f.read(512)\n count+= 1\n\n\n ################################################\n \n## tcp operation is completed\n## close file and socket ##\nf.close()\nclientsoc.close()\n\n#verbose\nif(printExtra):\n print(\"client finished tcp\")\n\n\n\n######################################################## udp part ##########################\n\n\n\n#### timeout control for udp ### \n############################################\n## the value when the timer is not active\nnotime = -1\n\n# start time of the timer\ntimer_start = notime\n\n# timeout \ntotal_time_count = 0.5\n############################################\n\n#verbose\nif(printExtra):\n print(\"udp starting\")\n\n\n############## USED VARIABLES ##############\n\n# read 512 byte from file \nchunksize = 512\n\n# sliding window start index\nfirst_in_window = 0\n\n\n### server and client ip and ports for udp\nINCOMING_ADDRESS = (server, udp_listen)\nOUTGOING_ADDR = ('', udp_sender)\n##########################################\n\n# window size\nw_size = 10\n\n# global flag to stop \nkill_me = 0\n\n# number of packages resent\nresend_packages = 0\n\n# lock to avoid race condition when listening acknowledge and sending data\n# during modification of the global or shared data\nlock = threading.Lock()\n\n############################################\n\ndef send_thread(inputfile,udp_socket,w_size,receiver_addresss):\n\n # use shared global variables #\n global notime\n global timer_start\n global total_time_count\n global first_in_window\n global lock\n INCOMING_ADDRESS = (receiver_addresss,udp_listen)\n global kill_me\n ###########################\n timeout = 0.5\n\n # index of the data that will be send next time\n \n # local variables to count or terminate the loop\n cnt = 0\n next_ind = 0\n kill_outsider = 0\n ################################################\n \n # open file to write\n f = safe_open_file(inputfile,\"rb\")\n\n\n # divide file into chunks of data\n # see helpers file to examine de split_data func implementation\n _, num_chunks, chunks = split_data(f,chunksize)\n\n\n #verbose\n if(printExtra):\n print(\"number of chunks : \", num_chunks)\n \n\n # rearrange the window sliding\n sliding_window_length = config_window(first_in_window,num_chunks,w_size)\n\n\n \n # this part sets the total_acks globally\n # so that receiver_thread will check this variable and stop\n global total_acks\n total_acks = num_chunks\n\n # verbose\n if(printExtra):\n print(\"total acks\",total_acks)\n ###########################################################\n\n # init and start another thread to listen acknowledges\n #recv_thread = threading.Thread(target = receive_thread, args = (udp_socket,num_chunks ))\n\n #recv_thread.start()\n\n\n ############################################################\n \n \n\n # data sending loop #\n while(first_in_window < num_chunks):\n\n # take the lock\n lock.acquire()\n \n cnt += 1\n\n # if the next data is in our sliding window and if we didnt exceed total data size\n while( next_ind < num_chunks and next_ind < first_in_window+ sliding_window_length):\n \n\n # if flag set terminate the loop\n if(kill_me):\n kill_outsider = 1\n break\n \n # verbose\n if(printExtra):\n print('Sending packet',next_ind)\n \n # create a packet\n # see helpers file imported for implementation\n\n new_chunk = create_packet(chunks[next_ind][0],chunks[next_ind][1])\n\n # send the prepared packet\n udp_socket.sendto(new_chunk,INCOMING_ADDRESS)\n next_ind +=1\n\n # check flags to terminate outer loop\n if(kill_me or kill_outsider):\n break\n \n\n \n #timer is not running\n if (timer_start == notime ):\n # run the timer\n timer_start = time.time()\n\n \n # if timer is running and when timeout occurs \n while((timer_start != notime) and not (time.time()- timer_start >= total_time_count)):\n lock.release()\n time.sleep(timeout)\n lock.acquire()\n\n\n # stop the timer \n if time.time() - timer_start >= total_time_count:\n if(timer_start != notime):\n timer_start = notime\n\n next_ind = first_in_window\n\n # reconfigure the window\n else:\n sliding_window_length = config_window(first_in_window,num_chunks,w_size)\n\n # end of racing part\n lock.release()\n \n\n \n # finished sending udp packages \n f.close()\n\n # verbose\n if(printExtra):\n print(\"client finished sending udp packages\")\n \n\n # print resend packages from the global variable\n # which is set by the acknowledge thread\n global resend_packages\n print(\"UDP Transmission Re-transferred Packets:\",resend_packages)\n\n \n\n\n# listening the acknowledges\ndef receive_thread(udp_socket):\n\n # use global variables\n global lock\n global first_in_window\n \n global notime\n global timer_start\n global total_time_count\n global kill_me\n\n counter =1\n\n global resend_packages\n \n # get first data from the server side\n chunk, _ = udp_socket.recvfrom(540)\n\n # ack listening loop\n while True:\n #chunk, _ = udp_socket.recvfrom(512)\n \n\n # parse the received acknowledge\n acknow,_,received_checksum,_ = open_packet(chunk)\n\n # checksum the package\n if(checksum(chunk[0:4]+chunk[4:12]+chunk[28:]) == received_checksum):\n \n #verbose\n if(printExtra):\n print('Got valid ACK', acknow)\n print(\"acknow\",acknow)\n print(\"total_acks\",total_acks)\n \n # last ack is received\n # set the flag and terminate the client\n if(acknow+1 == total_acks-1):\n kill_me = 1\n break \n \n # sequence number of received acknowledge is in the sliding window\n if(acknow >= first_in_window):\n lock.acquire()\n\n # slide the window\n first_in_window = acknow+1\n \n counter+=1\n \n # stop the timer to avoid timeout\n if(timer_start != notime):\n timer_start = notime\n\n lock.release()\n\n else:\n # acknowledge is not valid\n # package will be resent\n resend_packages += 1\n\n else:\n # ack is corrupted\n # package will be resent\n resend_packages += 1\n\n\n ########################################\n # if socket has data\n # get data from socket\n # this part avoids hanging at recvfrom\n r = 0\n try:\n r, _, _ = select.select([udp_socket], [], [])\n except:\n pass\n \n if r:\n chunk, _ = udp_socket.recvfrom(540)\n \n ########################################\n\n\n\n## create udp socket\nudp_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nudp_socket.bind(OUTGOING_ADDR)\nfilename = \"./transfer_file_UDP.txt\"\n\n#send_thread(filename,udp_socket,10)\n\nsender = threading.Thread(target = send_thread, args = (filename,udp_socket, w_size,server))\nrecv_thread = threading.Thread(target = receive_thread, args = (udp_socket, ))\nsender.start()\nrecv_thread.daemon = True\n#recv_thread.daemon = True\nrecv_thread.start()\n\n\nsender.join()\n\nudp_socket.close()\n\n\n\n\n\n\n\n\n\n\n\n########################## end of the implementation\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n","repo_name":"iltertaha/ceng","sub_path":"Network/the3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":9875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35972271421","text":"from . import repo\nfrom . import deb\nimport click\nimport sys\n\ndef repo_filter(dist):\n return 'stable' not in dist and 'testing' not in dist and not dist.startswith('Debian')\n\ndef fixup_deb_arch(arch):\n if arch == 'x86_64':\n return 'amd64'\n elif arch == 'aarch64':\n return 'arm64'\n\nclass DebianMirror(repo.Distro):\n def __init__(self, arch):\n arch = fixup_deb_arch(arch)\n mirrors = [\n deb.DebMirror('http://mirrors.edge.kernel.org/debian/', arch, repo_filter),\n deb.DebMirror('http://security.debian.org/', arch, repo_filter),\n ]\n super(DebianMirror, self).__init__(mirrors, arch)\n\n # For Debian mirrors, we need to override this method so that dependencies\n # can be resolved (i.e. build_package_tree) across multiple repositories.\n # This is namely required for the linux-kbuild package, which is typically\n # hosted on a different repository compared to the kernel packages\n def get_package_tree(self, version=''):\n all_packages = {}\n all_kernel_packages = []\n packages = {}\n repos = self.list_repos()\n with click.progressbar(repos, label='Listing packages', file=sys.stderr, item_show_func=repo.to_s) as repos:\n for repository in repos:\n repo_packages = repository.get_raw_package_db()\n all_packages.update(repo_packages)\n kernel_packages = repository.get_package_list(repo_packages, version)\n all_kernel_packages.extend(kernel_packages)\n\n for release, dependencies in deb.DebRepository.build_package_tree(all_packages, all_kernel_packages).items():\n packages.setdefault(release, set()).update(dependencies)\n return packages\n\n def to_driverkit_config(self, release, deps):\n headers = []\n for dep in deps:\n if dep.find(\"headers\") != -1:\n headers.append(dep)\n if dep.find(\"kbuild\") != -1:\n headers.append(dep)\n return repo.DriverKitConfig(release + \"-\" + self.arch, \"debian\", headers)\n","repo_name":"yogirajk/kernel-crawler","sub_path":"kernel_crawler/debian.py","file_name":"debian.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"28532568078","text":"\"\"\"\n###############################################################\n#\n# Author: Gelu Liuta\n# Creation date: 06.06.2023\n# Version: 1.0.0\n#\n# The code is MVP (minimal viable product) for Windows services observability and should be used accordingly\n# Core components: python libraries (win32serviceutil, win32service, time) and the prometheus instrumentation library prometheus_client\n###############################################################\n\"\"\"\n\nimport psutil\nimport logging\nimport time\n\n# Prometheus client for instrumentation\n# the start_ttp_server is the metrics endpoint which will be scraped by Prometheus server to collect the metrics\n\nfrom prometheus_client import start_http_server, Gauge\n\n# define the metrics which should be transfered to Prometheus\nNUMBER_WIN_SERVICES= Gauge('number_of_win_services', 'Number of windows services to be observed/monitor on the target server')\nNUMBER_WIN_SERVICES_RUNNING= Gauge('number_of_win_services_running', 'Number of windows services which are running')\nNUMBER_WIN_SERVICES_STOPPED= Gauge('number_of_win_services_stopped', 'Number of windows services which are stopped')\nPERCENT_WIN_SERVICES_RUNNING= Gauge('percent_of_win_services_running', 'Percent of windows services which are running')\n\n# Decorate function with metric.\n@NUMBER_WIN_SERVICES.time()\n@NUMBER_WIN_SERVICES_RUNNING.time()\n@NUMBER_WIN_SERVICES_STOPPED.time()\n@PERCENT_WIN_SERVICES_RUNNING.time()\n\n\ndef collect_metrics(services_toBeMonitored):\n\n metrics = calculate_metrics(services_toBeMonitored)\n\n NUMBER_WIN_SERVICES.set(metrics[0])\n NUMBER_WIN_SERVICES_RUNNING.set(metrics[1])\n NUMBER_WIN_SERVICES_STOPPED.set(metrics[2])\n PERCENT_WIN_SERVICES_RUNNING.set(metrics[3])\n\n time.sleep(5)\n\ndef getService(name):\n\n service = None\n try:\n service = psutil.win_service_get(name)\n service = service.as_dict()\n except Exception as ex:\n print(str(ex))\n return service\n\ndef calculate_metrics(services_toBeMonitored):\n \"\"\"a python function to collect the metrics for running and stopped services in the monitoring scope on a target server\n services_toBeMonitored = a collection of the service names which are to be observed/monitored\n\n index 0 = Number of Windows services to be observed/monitor on the target server\n index 1 = Number of Windows services which are running\n index 2 = Number of Windows services which are stopped\n index 3 = Percentage of Windows services which are running\n \"\"\"\n\n metrics = [0,0,0,0]\n\n number_RunningServices = 0\n number_not_RunningServices = 0\n metrics[0] = len(services_toBeMonitored)\n\n for service_item in services_toBeMonitored:\n\n service = getService(service_item)\n print(service)\n logEntry = service_item + \" | \" + service[\"display_name\"] + \" | \" + service[\"binpath\"] + \" | \" + service[\"username\"] + \" | \" + service[\"start_type\"] +\" | \" + service[\"status\"] + \" | \" + str(service[\"pid\"]) + \" | \" + service[\"description\"]\n print (logEntry)\n\n if service:\n print(\"service found\")\n else:\n print(\"service not found\")\n\n if service and service['status'] == 'running':\n print(\"service is running\")\n number_RunningServices += 1\n logging.info(logEntry)\n else:\n print(\"service is not running\")\n number_not_RunningServices += 1\n logging.warning(logEntry)\n\n metrics[1] = number_RunningServices\n metrics[2] = number_not_RunningServices\n metrics[3] = round((number_RunningServices/metrics[0]),2) * 100\n\n return metrics\n\nif __name__ == '__main__':\n\n logging.basicConfig(filename='WindowsServiceStatus.log', filemode='a', encoding=\"UTF-8\", level=logging.INFO,\n format='%(asctime)s | %(process)d | %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n list_services_toBeMonitored = ['ClicktorunSVC', 'ClipSVC', 'AJRouter']\n calc_metrics = calculate_metrics(list_services_toBeMonitored)\n\n\n print(\"Total Number of Monitored Services: \" + str(calc_metrics[0]))\n print (\"Total Number of Running Services: \"+ str(calc_metrics[1]))\n print(\"Total Number of NOT Running Services: \" + str(calc_metrics[2]))\n print(\"Percent of Running Services: \" + str(calc_metrics[3]) + \" %\")\n\n # start Prometheus agent and collect metrics\n\n start_http_server(8000)\n while True:\n collect_metrics(list_services_toBeMonitored )\n","repo_name":"GeluConstantin/WindowsServicesObservability_MVP","sub_path":"src/Metrics-and-Logs/winservices-metrics-logs-collector-no-winservice.py","file_name":"winservices-metrics-logs-collector-no-winservice.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71464357958","text":"\nfrom doctest import Example\nimport copy\nfrom typing import Dict, List, Tuple, Union, TextIO\n\nafile = open('/Users/crystal/Desktop/CSCA08/Assignment3/starter/play.txt')\n\ndef read_lines(play, character):\n \"\"\" (file open for reading, str) -> list of str\n Return the list of dialogs (with all newlines removed) made by character in play.\n >>> file = open('play.txt')\n >>> actual = read_lines(file, 'MARK ANTONY')\n >>> expected = \\\n ['I am sorry to give breathing to my purpose,--', \\\n 'Now, my dearest queen,--', \\\n \"What's the matter?\"]\n >>> actual == expected\n True\n >>> file.close()\n \"\"\"\n\n \"\"\"result = []\n # read the first line, which includes a character's name and first line of dialog\n line = play.readline().strip()\n while line:\n # parse name and dialog\n name = line[1: line.find(']')]\n dialog = line[line.find(']') + 2:]\n # read next line\n line = play.readline().strip()\n # if line is continued dialogue, store in dialogue and continue reading until not\n while line and ']' not in line:\n dialog = dialog + ' ' + line\n line = play.readline().strip()\n\n # break so not, so then append dialogue if character name is ours\n if name == character:\n result.append(dialog)\n # return result.\n return result\"\"\"\n\ndef bubble_sort(lst: list) -> None:\n \"\"\"Sort the items of lst in non-decreasing order, in place.\n\n >>> lst = [4, 2, 5, 6, 7, 3, 1]\n >>> bubble_sort(lst)\n >>> lst\n [1, 2, 3, 4, 5, 6, 7]\n >>> lst = [5, 2]\n >>> bubble_sort(lst)\n >>> lst\n [2, 5]\n >>> lst = [42]\n >>> bubble_sort(lst)\n >>> lst\n [42]\n >>> lst = []\n >>> bubble_sort(lst)\n >>> lst\n []\n \"\"\"\n\n for i in range(len(lst), 1, -1):\n bubble_up(lst, i)\n # print(lst) # uncomment to see the passes\n\n\ndef bubble_up(lst: list, end: int) -> None:\n \"\"\"Bubble up the largest element in lst[:end] into index end-1.\n\n Preconditions: 0 <= end <= len(lst)\n\n >>> lst = []\n >>> bubble_up(lst, 0)\n >>> lst\n []\n >>> lst = [42]\n >>> bubble_up(lst, 1)\n >>> lst\n [42]\n >>> lst = [4, 2, 5, 6, 7, 3, 1]\n >>> bubble_up(lst, 7)\n >>> lst\n [2, 4, 5, 6, 3, 1, 7]\n >>> lst = [4, 2, 5, 6, 7, 3, 1]\n >>> bubble_up(lst, 5)\n >>> lst\n [2, 4, 5, 6, 7, 3, 1]\n\n \"\"\"\n\n for i in range(1, end):\n if lst[i - 1] > lst[i]:\n lst[i - 1], lst[i] = lst[i], lst[i - 1] # swap!\n\nfrom typing import List\n\ndef insert(lst: List[int], v: int) -> None:\n \"\"\"Insert v into lst just before the rightmost item greater than v, or at\n index 0 if no items are greater than v.\n\n >>> my_list = [3, 10, 4, 2]\n >>> insert(my_list, 5)\n >>> my_list\n [3, 5, 10, 4, 2]\n >>> my_list = [5, 4, 2, 10]\n >>> insert(my_list, 20)\n >>> my_list\n [20, 5, 4, 2, 10]\n \"\"\"\n # init right most 0 in case none\n rightmost = 0\n # add val to lst\n lst.append(v)\n # find right most greatest INDEX\n for i in range(0, len(lst)-1):\n if lst[i] > v:\n rightmost = i\n # loop backwards, set j-1 to j so that [1,2,3,4] becomes [1,1,2,3]\n for j in range(len(lst)-1, rightmost-1, -1):\n lst[j] = lst[j-1]\n #.set index to v\n lst[rightmost] = v\n \ni = 'a'\nj = 'b'\nprint('({1}, {0})'.format(i, j))","repo_name":"cdecry/CSCA08","sub_path":"Assignment3/starter/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25221048891","text":"from Motor.motor import *\nfrom network import *\nfrom camera import *\nfrom queue import *\nfrom radar import *\nfrom gps import *\nfrom compass import *\nfrom waypoint import *\n\n\nfrom threading import Thread\nimport time\nimport json\n\n\nclass Controller:\n def __init__(self):\n self.motor = Motor()\n\n # NETWORK THREAD\n self.queue = Queue() # Shared queue\n self.network = Network(self.queue)\n self.network.start()\n\n # SENSORS SETUP\n self.gps = Gps()\n self.compass = Compass()\n self.radar = Radar()\n\n # CAMERA THREAD\n self.camera = Camera()\n self.camera.start()\n\n # WAYPOINT NAVIGATION CONTROLLER\n # self.waypoint_navigation = Waypoint(self.motor, self.gps, self.compass)\n\n self.ground_station_ip_address = \"0\"\n\n #self.waypoint_navigation.addWaypoint([45.6327, 8.8289])\n # self.waypoint_navigation.setRun(True)\n\n ''' print(\"ROTATING\")\n while True:\n self.waypoint_navigation.rotateToBearing(self.compass.getBearingNormalized(),\n 180)'''\n\n print(\"ROVER STARTED\")\n\n def run(self):\n while True:\n try:\n # READ ONE COMMAND PACKET AND EXECUTE ALL THE COMMANDS FROM GROUND STATION\n if not self.queue.empty():\n data = json.loads(self.queue.get())\n\n for c in data['commands']:\n # SEND NEW DATA\n if c['command'] == 'update':\n self.network.sendData(self.collectAllData())\n\n # CAMERA STREAM COMMANDS\n if c['command'] == 'C_Stream_Start':\n self.ground_station_ip_address = c['value']\n self.camera.setGroundStationIpAddress(\n self.ground_station_ip_address)\n self.camera.startVideoStream()\n\n if c['command'] == 'C_Stream_Stop':\n self.camera.stopVideoStream()\n\n if c['command'] == 'C_Take_Photo':\n img_encoded = self.camera.takePhoto()\n self.network.sendBytes(img_encoded)\n\n # MOTORS COMMANDS\n if c['command'] == 'RM_dir':\n self.motor.setRightMotorDirection(c['value'])\n\n if c['command'] == 'LM_dir':\n self.motor.setLeftMotorDirection(c['value'])\n\n if c['command'] == 'RM_speed':\n self.motor.setRightMotorSpeed(c['value'])\n\n if c['command'] == 'LM_speed':\n self.motor.setLeftMotorSpeed(c['value'])\n\n # HERE ALL SENSORS LOOPS\n #self.motor.update()\n self.waypoint_navigation.update()\n self.gps.update()\n self.radar.update()\n\n except KeyboardInterrupt:\n self.motor.handleShutdown()\n self.camera.stop()\n self.network.stop()\n self.camera.join()\n self.network.join()\n exit()\n\n def collectAllData(self):\n gps_data = self.gps.getData()\n\n data = json.dumps({\n \"motor\": {\n \"state\": \"STOP\",\n \"left_power\": self.motor.getLeftMotorSpeed(),\n \"right_power\": self.motor.getRightMotorSpeed(),\n \"left_tick\": self.motor.getLeftTick(),\n \"right_tick\": self.motor.getRightTick(),\n \"left_rpm\": self.motor.getLeftRPM(),\n \"right_rpm\": self.motor.getRightRPM()\n },\n\n \"compass\": self.compass.getBearing(),\n \"radar\": self.radar.getDistances(),\n\n \"gps\": {\n \"fix\": gps_data.has_fix,\n \"fix_quality\": gps_data.fix_quality,\n \"satellites\": gps_data.satellites,\n \"latitude\": round(gps_data.latitude, 4) if gps_data.latitude != None else \"\",\n \"longitude\": round(gps_data.longitude, 4) if gps_data.longitude != None else \"\",\n \"speed\": gps_data.speed_knots if gps_data.speed_knots != None else \"\",\n \"altitude\": gps_data.altitude_m if gps_data.altitude_m != None else \"\"\n }\n })\n\n return data\n\n\n'''JSON MESSAGE TO GROUND STATION\n{\n \"radar\": [0, 1, 2, 3, 4, 5],\n \"gps\": {\n \"latitude\": 1.00,\n \"longitude\": 1.00,\n \"speed\": 10.0,\n \"altitude\": 220\n },\n \"imu\": [pitch, roll],\n \"compass\": gradi_int,\n \"motor\": {\n \"state\": \"STOP\",\n \"left_power\": 100,\n \"right_power\": 100\n }\n}\n\n\n\n'''\n","repo_name":"MatteoFormentin/Rover","sub_path":"rover/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37537415634","text":"#!/usr/bin/env python3\n\nimport os, sys\nimport getopt\nfrom datetime import datetime as dt\nimport yfinance as yf\n\nCWD = os.getcwd()\nEXCHANGES = {\"ASX\":\".AX\",\n \"CBOT\":\".CBT\",\n \"CME\":\".CME\",\n \"COMEX\":\".CMX\",\n \"GLOBAL\":\"=X\",\n \"HKEX\":\".HK\",\n \"KOSDAQ\":\".KQ\",\n \"KSE\":\".KS\",\n \"NASDAQ\":\"\",\n \"NYMEX\":\".NYM\",\n \"NZX\":\".NZ\",\n \"OPRA\":\"\",\n \"SGX\":\".SI\",\n \"SIX\":\".SW\",\n \"SSE\":\".SS\",\n \"TSX\":\".TO\",\n \"TWSE\":\".TW\"}\n\ndef quote(symbol, exchange, report, query):\n if exchange:\n try: symbol += EXCHANGES[exchange.upper()]\n except KeyError: pass\n symbol = symbol.upper()\n data = yf.Ticker(symbol).info\n output = \"\"\n fields = [*data.keys()]\n ignore = ['longName', 'shortName', 'name', 'symbol', 'description',\n 'longBusinessSummary', 'logo_url', 'phone', 'address1', 'address2',\n 'state', 'city', 'zip', 'fax', 'gmtOffSetMilliseconds', 'lastMarket',\n 'maxAge', 'messageBoardId', 'market', 'fullTimeEmployees', 'twitter',\n 'exchangeTimezoneName', 'exchangeTimezoneShortName', 'financialCurrency',\n 'currentPrice', 'open', 'previousClose', 'dayHigh', 'dayLow']\n # ^ 'regularMarket' fields are used instead\n if len(fields) < 10:\n print(\"No data available for the given symbol! Please try a different input.\")\n sys.exit(1)\n # Name\n if 'longName' in fields: output = f\"{data['longName']} ({symbol})\\n\"\n else: output = f\"{data['shortName']}\\n\"\n if query:\n for f in fields:\n if query.lower() == f.lower():\n print(output, end=\"\")\n print(f\"{f[0].upper()}{f[1:]}: {data[f]}\")\n return\n print(f\"Query field not found! Displaying full data for {symbol} instead...\\n\")\n quote(symbol, \"\", False, \"\")\n return\n # Introduction Fields\n output += \" ---- \\n\"\n if 'website' in fields: output += f\"Website: {data.pop('website')}\\n\"\n if 'country' in fields: output += f\"Country: {data.pop('country')}\\n\"\n if 'sector' in fields: output += f\"Sector: {data.pop('sector')}\\n\"\n if 'industry' in fields: output += f\"Industry: {data.pop('industry')}\\n\"\n if 'quoteType' in fields: output += f\"Type: {data.pop('quoteType')}\\n\"\n if 'exchange' in fields: output += f\"Exchange: {data.pop('exchange')}\\n\"\n if 'currency' in fields: output += f\"Currency: {data.pop('currency')}\\n\"\n output += \" ---- \\n\"\n # \"Priority\" Details\n if 'regularMarketPrice' in fields:output += f\"CurrentPrice: {data.pop('regularMarketPrice')}\\n\"\n if 'bid' in fields: output += f\"Bid: {data.pop('bid')}\\n\"\n if 'ask' in fields: output += f\"Ask: {data.pop('ask')}\\n\"\n if 'regularMarketOpen' in fields: output += f\"Open: {data.pop('regularMarketOpen')}\\n\"\n if 'regularMarketPreviousClose' in fields: output += (f\"PreviousClose: \" +\n f\"{data.pop('regularMarketPreviousClose')}\\n\")\n if 'regularMarketDayHigh' in fields: output += f\"DayHigh: {data.pop('regularMarketDayHigh')}\\n\"\n if 'regularMarketDayLow' in fields: output += f\"DayLow: {data.pop('regularMarketDayLow')}\\n\"\n if 'trailingPE' in fields: output += f\"PriceEarnings: {data.pop('trailingPE')}\\n\"\n if 'marketCap' in fields: output += f\"MarketCap: {data.pop('marketCap')}\\n\"\n if 'volume' in fields: output += f\"Volume: {data.pop('volume')}\\n\"\n output += \" ---- \\n\"\n # Everything Else\n for k, v in sorted(data.items()):\n if k in ignore: continue\n if v:\n if k[0:7] == 'regular': continue\n output += f\"{k[0].upper()}{k[1:]}: {v}\\n\"\n # Output\n print(output)\n if report:\n with open(f\"{CWD}/Quote_{symbol}_\" + \n f\"{dt.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt\", 'w') as f:\n f.write(\"SIMPLE C-LI QUOTER\\n\\n\")\n f.write(f\"Report generated: {dt.now().strftime('%H:%M:%S %d-%m-%Y')}\\n\")\n f.write(\"_\" * 37 + \"\\n\\n\")\n f.write(output)\n f.write(\"\\n -- End of report.\\n\\n\")\n f.write(\"Thank you for using the Simple C-Li Quoter!\\n\")\n \ndef main():\n argList = sys.argv[1:]\n opts = \"hs:e:rq:\"; longOpts = [\"help\", \"symbol=\", \"exchange=\", \"report\", \"query=\"]\n try:\n args, vals = getopt.getopt(argList, opts, longOpts)\n initiator = [\"\", \"\", False, \"\"]\n for a, v in args:\n if a in (\"-h\", \"--help\"): help()\n elif a in (\"-s\", \"--symbol\"): initiator[0] = v\n elif a in (\"-e\", \"--exchange\"): initiator[1] = v\n elif a in (\"-r\", \"--report\"): initiator[2] = True\n elif a in (\"-q\", \"--query\"): initiator[3] = v\n if not initiator[0]: raise getopt.error(\"Usage error.\")\n except getopt.error:\n print(\"Incorrect usage!\")\n help()\n quote(*initiator)\n print(\"Thank you for using the Simple C-Li Quoter!\")\n\ndef help():\n print(\"Usage: ./simple_cli_quoter.py [-h] [-s SYMBOL] [-e EXCHANGE] [-r]\")\n print(\" no arguments: Display this help message.\")\n print(\" -e EXCHANGE, --exchange EXCHANGE: Exchange or Market (eg. ASX, HKEX). \" +\n \"For currencies, include as 'global'. If omitted this will default to US Markets.\")\n print(\" -s SYMBOL, --symbol SYMBOL: Ticker symbol (mandatory).\")\n print(\" -q FIELD, --query FIELD: Query for a singular field from the data (no spaces). \" +\n \"No report will be generated with this option. If the field is not found, \" +\n \"then the entire data will be output.\")\n print(\" -h, --help: Show this help message.\")\n print(\" -r, --report: Create a .txt report in the current working directory.\")\n print(\"Argument values are case-insensitive.\\nSee the README.md for further details...\")\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n sys.exit(0)\n","repo_name":"ndsi6382/Simple_CLI_Quoter","sub_path":"simple_cli_quoter.py","file_name":"simple_cli_quoter.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"69805126917","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 7 14:49:57 2017\n\n@author: DREAM\n\"\"\"\n\nimport datetime as dt\n\napp_name = \"Ping_CMDR\"\nversion = \"0.07.00\"\nname = app_name + \" V\" + version\nemail = \"David@DREAM-Enterprise.com\"\nbuild_date = \"09-17-2017\"\n\n\n#set date and time \nstartdate = dt.date.today().strftime(\"%m%d%Y\")\nstart_time = dt.datetime.now().strftime(\"%H%M%S\")\ncurrdate = dt.date.today().strftime(\"%m/%d/%Y\")\ncurrtime = dt.datetime.now().strftime(\"%H:%M:%S\")\n\n \n\n#define system varibles\ndev_count = 6\ndev_range = range(dev_count)\nmax_dev_count = 10\n\nwait = 1\nsplash_wait = 2.5\n\nwidth = 20\nlines = 4\ncent_width = int(width)-1\ndev_width = 10\n\ndiag = 0\nloop_count = 0\n\nlog_option = 0\nfile_log = \"Ping Log - \" + startdate + \" - \" + start_time\ndir_log = \"Ping Logs\"\n\noption_0 = \"Exit\"\noption_1 = \"Feature Not Supported\"\noption_2 = \"Set POS Count\"\noption_3 = \"Run Ping Tool\"\noption_4 = \"Feature Not Supported\"\noption_5 = \"Toggle Log File Function\"\n\n\nip_scheme = \"192.168.31.\"\n\nip_internet = \"8.8.8.8\"\n\ndev_list = [\"Commander\", \"Router\",\n \"POS 1\", \"POS 2\", \n \"Pinpad 1\", \"Pinpad 2\", \n \"POS 3\", \"Pinpad 3\",\n \"POS 4\", \"Pinpad 4\"]\n\nip_list = [\"11\", \"31\",\n \"101\", \"102\",\n \"126\", \"127\",\n \"103\", \"128\",\n \"104\", \"129\"]\n\nstatus_internet = \"\"\n\nstatus_list = [\"\",\"\",\n \"\",\"\",\n \"\",\"\",\n \"\",\"\",\n \"\",\"\"]\n\nconn_internet = 0\n\nconn_list = [0,0,\n 0,0,\n 0,0,\n 0,0,\n 0,0]\n\nshow = [\"\",\"\",\n \"\",\"\",\n \"\",\"\",\n \"\",\"\",\n \"\",\"\"]\n\ndiff_internet = 0\n\ndiff = [0,0,\n 0,0,\n 0,0,\n 0,0,\n 0,0]\n\nuptime_internet = currtime\n\nuptime = [currtime,currtime,\n currtime,currtime,\n currtime,currtime,\n currtime,currtime,\n currtime,currtime]\n\n\n\n","repo_name":"CyberCityCircuits/Ping_CMDR","sub_path":"Scripts/var_ping.py","file_name":"var_ping.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30656465650","text":"#PROGRAMA PRINCIPAL:\r\ndef main():\r\n try:\r\n origen = open(\"RangoAlturas.txt\",'r')\r\n destino = open(\"PromedioAlturas.txt\",'w')\r\n except IOError as mensaje:\r\n print(mensaje)\r\n else:\r\n print(\"Se procede a grabar un archivo con el promedio de las alturas\")\r\n ctdad = 0\r\n suma = 0\r\n for registro in enumerate(origen):\r\n registro = list(registro)\r\n registro[1] = registro[1].replace(\"\\n\",\"\")\r\n if registro[1].isalpha():\r\n if suma != 0:\r\n try:\r\n prom = suma / ctdad\r\n prom = round(prom,2)\r\n except ZeroDivisionError:\r\n prom = 0\r\n prom = str(prom) + \"\\n\"\r\n destino.write(prom)\r\n suma = 0\r\n ctdad = 0\r\n registro[1] += \"\\n\"\r\n destino.write(registro[1])\r\n if registro[0] == 0:\r\n continue\r\n else:\r\n ctdad += 1\r\n suma += float(registro[1])\r\n registro[1] += \"\\n\"\r\n try:\r\n prom = suma / ctdad\r\n prom = round(prom,2)\r\n except ZeroDivisionError:\r\n prom = 0\r\n prom = str(prom) + \"\\n\"\r\n destino.write(prom)\r\n origen.close()\r\n destino.close()\r\n\r\nmain()\r\n ","repo_name":"maximilianogomez/Progra1","sub_path":"Practica 6/6.3(Creación_promedioalturas).py","file_name":"6.3(Creación_promedioalturas).py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21304164170","text":"import os\r\nimport logging\r\n\r\n# logging.disable(logging.WARNING)\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \r\n\r\nimport json\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport scipy\r\n\r\nimport tensorflow as tf\r\n# Hide GPU from visible devices\r\ntf.config.set_visible_devices([], 'GPU')\r\n\r\nimport logging\r\ntf.get_logger().setLevel(logging.ERROR)\r\n\r\nfrom Kratos_Simulators.structural_mechanics_kratos_simulator import StructuralMechanics_KratosSimulator\r\n\r\nfrom ArchitectureFactories.POD_factory import POD_Architecture_Factory\r\nfrom ArchitectureFactories.Quad_factory import Quad_Architecture_Factory\r\nfrom ArchitectureFactories.PODANN_factory import PODANN_Architecture_Factory\r\n\r\ntf.keras.backend.set_floatx('float64')\r\n \r\nclass Scipy_Solver():\r\n\r\n def __init__(self, working_path, model_path, best):\r\n self.working_path=working_path\r\n self.model_path=working_path+model_path\r\n self.results_path=self.model_path+'scipy_solver_results/'\r\n if best=='x':\r\n self.model_weights_path=self.model_path+'best/'\r\n self.model_weights_filename=self.get_last_best_filename(self.model_weights_path, 'weights_x_')\r\n self.best_name_part='_bestx_'\r\n elif best=='r':\r\n self.model_weights_path=self.model_path+'best/'\r\n self.model_weights_filename=self.get_last_best_filename(self.model_weights_path, 'weights_r_')\r\n self.best_name_part='_bestr_'\r\n elif best is None:\r\n self.model_weights_path=self.model_path\r\n self.model_weights_filename='model_weights.h5'\r\n self.best_name_part=''\r\n else:\r\n print('Value for --best argument is not recognized. Terminating')\r\n exit()\r\n\r\n os.makedirs(os.path.dirname(self.results_path), exist_ok=True)\r\n\r\n with open(self.model_path+\"train_config.npy\", \"rb\") as train_config_file:\r\n self.train_config = np.load(train_config_file,allow_pickle='TRUE').item()\r\n print(self.train_config)\r\n self.dataset_path=working_path+self.train_config['dataset_path']\r\n\r\n def get_last_best_filename(self, model_weights_path, prefix):\r\n matching_files = [file for file in os.listdir(model_weights_path) if file.startswith(prefix)]\r\n highest_filename = sorted(matching_files, key=lambda x: int(x[len(prefix):][:-len('.h5')]))[-1]\r\n return highest_filename\r\n\r\n def architecture_factory_selector(self, arch_config):\r\n arch_type = arch_config[\"name\"]\r\n if arch_type == 'PODANN':\r\n return PODANN_Architecture_Factory(self.working_path, arch_config)\r\n elif arch_type == 'Quad':\r\n return Quad_Architecture_Factory(self.working_path, arch_config)\r\n elif arch_type == 'POD': \r\n return POD_Architecture_Factory(self.working_path, arch_config)\r\n else:\r\n print('No valid architecture type was selected')\r\n return None\r\n \r\n def kratos_simulator_selector(self, sim_type):\r\n # if 'fluid' in sim_type:\r\n # return KratosSimulator_Fluid\r\n # else:\r\n return StructuralMechanics_KratosSimulator\r\n \r\n def prepare_reference_data(self):\r\n S_true=np.load(self.dataset_path+'FOM/FOM_equalForces_30steps.npy')\r\n F_true=np.load(self.dataset_path+'FOM/POINTLOADS_equalForces_30steps.npy')\r\n return S_true, F_true\r\n \r\n def optimisation_routine(self, q0, f_vectors, s_true):\r\n f_vectors=np.expand_dims(f_vectors, axis=0)\r\n # snapshot_true=np.expand_dims(s_true, axis=0)\r\n snapshot_true=s_true\r\n\r\n print()\r\n \r\n print(snapshot_true.shape)\r\n q_goal=self.encode(snapshot_true)\r\n print(q_goal.shape)\r\n S_goal_pred=self.decode(q_goal)\r\n\r\n r_vector_goal = self.kratos_simulation.get_r_forces_(S_goal_pred, f_vectors)[0]\r\n goal_r_norm = np.linalg.norm(r_vector_goal)\r\n print('Goal residual norm: ', goal_r_norm)\r\n\r\n def opt_function(x):\r\n if len(x.shape)==1:\r\n x=np.expand_dims(x,axis=0)\r\n snapshot = self.decode(x)\r\n r_vector = self.kratos_simulation.get_r_forces_(snapshot, f_vectors)[0]\r\n r_norm = np.linalg.norm(r_vector)\r\n return r_norm\r\n \r\n def opt_function_u_diff(x):\r\n if len(x.shape)==1:\r\n x=np.expand_dims(x,axis=1)\r\n snapshot = self.data_normalizer.process_input_to_raw_format(x)\r\n s_norm = np.linalg.norm(snapshot_true - snapshot)\r\n return s_norm\r\n \r\n # minimizer_kwargs = {\"method\":\"L-BFGS-B\", \"options\":{\"maxiter\":2}}\r\n # optim_result = scipy.optimize.basinhopping(opt_function, q0, niter=2, minimizer_kwargs=minimizer_kwargs)\r\n optim_result = scipy.optimize.basinhopping(opt_function, q0, stepsize=0.1)\r\n\r\n print(optim_result)\r\n\r\n if len(optim_result.x.shape)==1:\r\n q_final=np.expand_dims(optim_result.x,axis=0)\r\n else:\r\n q_final=optim_result.x\r\n \r\n snapshot_final = self.decode(q_final)\r\n snapshot_rel_error_to_FOM = np.linalg.norm(snapshot_final-snapshot_true)/np.linalg.norm(snapshot_true)\r\n \r\n r_vector_final = self.kratos_simulation.get_r_forces_(snapshot_final, f_vectors)\r\n final_r_norm = np.linalg.norm(r_vector_final)\r\n\r\n print('Final r norm: ', final_r_norm)\r\n print('Relative error on x: ', snapshot_rel_error_to_FOM)\r\n\r\n reactions = -1*self.kratos_simulation.get_r_forces_withDirich_(snapshot_final, f_vectors)\r\n\r\n iteration_errors_list = [snapshot_rel_error_to_FOM,final_r_norm,goal_r_norm]\r\n \r\n return optim_result, snapshot_final, reactions, iteration_errors_list\r\n\r\n def execute_solver(self):\r\n\r\n # Select the architecture to use\r\n arch_factory = self.architecture_factory_selector(self.train_config[\"architecture\"])\r\n\r\n # Create a fake Analysis stage to calculate the predicted residuals\r\n kratos_simulation_class=self.kratos_simulator_selector(self.train_config[\"sim_type\"])\r\n self.kratos_simulation = kratos_simulation_class(self.working_path, self.train_config)\r\n crop_mat_tf, crop_mat_scp = self.kratos_simulation.get_crop_matrix()\r\n\r\n # Select the type of preprocessing (normalisation)\r\n prepost_processor=arch_factory.prepost_processor_selector(self.working_path, self.train_config[\"dataset_path\"])\r\n\r\n S_FOM_orig = arch_factory.get_orig_fom_snapshots(self.train_config['dataset_path'])\r\n arch_factory.configure_prepost_processor(prepost_processor, S_FOM_orig, crop_mat_tf, crop_mat_scp)\r\n\r\n print('======= Instantiating TF Model =======')\r\n network, enc_network, dec_network = arch_factory.define_network(prepost_processor, self.kratos_simulation)\r\n \r\n print('======= Loading TF Model weights =======')\r\n network.load_weights(self.model_weights_path+self.model_weights_filename)\r\n\r\n print('======= Defining encoder and decoder routines =======')\r\n self.encode=arch_factory.NMROM_encoder(prepost_processor, enc_network)\r\n self.decode=arch_factory.NMROM_decoder(prepost_processor, dec_network)\r\n\r\n print('======= Preparing reference data =======')\r\n S_true, F_true = self.prepare_reference_data()\r\n print('S_true shape: ', S_true)\r\n print('F_true shape: ', F_true)\r\n\r\n print('======= Running solver routine =======')\r\n results_file_path = self.results_path+'Scipy_results_matrix.npy'\r\n snapshots_file_path = self.results_path+'Scipy_snapshots_matrix.npy'\r\n reactions_file_path = self.results_path+'Scipy_reactions_matrix.npy'\r\n\r\n np.save(results_file_path, np.array([]))\r\n np.save(snapshots_file_path, np.array([]))\r\n np.save(reactions_file_path, np.array([]))\r\n\r\n q0 = np.zeros(6)\r\n\r\n for i, forces in enumerate(F_true):\r\n\r\n optim_result, snapshot_final, reactions, iteration_errors_list = self.optimisation_routine(q0, forces, S_true[i])\r\n\r\n results_mat = list(np.load(results_file_path))\r\n results_mat.append(iteration_errors_list)\r\n np.save(results_file_path, np.array(results_mat))\r\n\r\n snapshots_mat = list(np.load(snapshots_file_path))\r\n snapshots_mat.append(snapshot_final)\r\n np.save(snapshots_file_path, np.array(snapshots_mat))\r\n\r\n reactions_mat = list(np.load(reactions_file_path))\r\n reactions_mat.append(reactions)\r\n np.save(reactions_file_path, np.array(reactions_mat))\r\n\r\n #q=optim_result.x\r\n # q0=q\r\n\r\n ","repo_name":"NicolasSR/NonlinearManifolds_KratosROM","sub_path":"scipy_solver.py","file_name":"scipy_solver.py","file_ext":"py","file_size_in_byte":8694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73386170758","text":"# Write a Python function to check whether a string is a pangram or not. Note : Pangrams are words or sentences containing every letter of the alphabet at least once.\n# For example : \"The quick brown fox jumps over the lazy dog\"\ndef check_pangram(sentence):\n\talphabet = ['a','b','c','d','e','f','g','h','o','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\tfor letter in sentence:\n\n\t\tif letter in alphabet:\n\t\t\talphabet.remove(letter)\t\n\tprint(alphabet)\n\tif not alphabet:\n\t\tprint('This sentence is pangram')\n\telse:\n\t\tprint('This sentence is not pangram')\ncheck_pangram('The quick brown fox jumps over the lazy dog')\n\n# Write a Python program to print the even numbers from a given list.\n# Sample List : [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# Expected Result : [2, 4, 6, 8]\ndef get_even(l1):\n\teven = list()\n\tfor num in l1:\n\t\tif not num == '' and not num == ' ':\n\t\t\tif not int(num) % 2:\n\t\t\t\teven.append(num)\n\tprint('Expected Result:', even)\nl1 = list(input('Sample List: '))\nprint(l1)\nget_even(l1)\n\n\n# Write a Python function that takes a list and returns a new list with unique elements of the first list.\n# Sample List : [1,2,3,3,3,3,4,5]\n# Unique List : [1, 2, 3, 4, 5]\ndef del_duplicates(l1):\n\tresult = list()\n\tfor i in l1:\n\t\tif not i in result and not i == '' and not i == ' ' :\n\t\t\tresult.append(i)\n\tprint('Unique List:',result)\nl1 = list(input('Sample list: '))\ndel_duplicates(l1)\n\n# Write a Python function that accepts a string and calculate the number of upper case letters and lower case letters. Go to the editor\ndef calculate_cases(sentence):\n\tlower = 0\n\tupper = 0\n\tfor i in sentence:\t\n\t\tif i.islower():\n\t\t\tlower += 1\n\t\telif i.isupper():\n\t\t\tupper += 1\n\tprint('No. of Upper case characters :',upper)\n\tprint('No. of Lower case characters :', lower)\nsentence = input('Sample string: ')\ncalculate_cases(sentence)\n\n# 1. A simple function\ndef favorite_movie(movie_name):\n\tprint('My favourite movie is', movie_name)\nfavorite_movie('Rembo')\n\n# 2. Creating a dictionary.\ndef make_country(name, capital):\n\tcountry = dict()\n\tcountry['name'] = name\n\tcountry['capital'] = capital\n\tprint(country)\nmake_country('Ukraine', 'Kyiv')\n\n# 3. A simple calculator\ndef make_operation(operation, *nums):\n\tresult = nums[0]\n\tprint(nums[0])\n\tif operation == '+':\n\t\tfor i, num in enumerate(nums):\n\t\t\tif i > 0:\n\t\t\t\tresult += num\n\t\t\t\tprint(result, num)\n\telif operation == '-':\n\t\tfor i, num in enumerate(nums):\n\t\t\tif i > 0:\n\t\t\t\tresult -= num\n\telif operation == '*':\n\t\tfor i, num in enumerate(nums):\n\t\t\tif i > 0:\n\t\t\t\tresult *= num\n\telif operation == '/':\n\t\t\n\t\tfor i,num in enumerate(nums):\n\t\t\tif i > 0:\n\t\t\t\tresult /= num\n\n\treturn result\nprint(make_operation('/', 12,6,3))\n\n","repo_name":"beetroot-academy-rivne/hw-bogdan-quiz","sub_path":"hw10.py","file_name":"hw10.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43082317927","text":"import json\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\nfrom logging import Logger, LogRecord, StreamHandler\nfrom typing import NoReturn, Union\n\nfrom pythonjsonlogger.jsonlogger import JsonFormatter\n\nfrom trafalgar_log.app import SETTINGS, DEFAULT_FIELDS_TO_SHAMBLE\nfrom trafalgar_log.core.enums import LogFields\n\nAPP: str = LogFields.APP.value\nFLOW: str = LogFields.FLOW.value\nCODE_LINE: str = LogFields.CODE_LINE.value\nCORRELATION_ID: str = LogFields.CORRELATION_ID.value\nDATE_TIME: str = LogFields.DATE_TIME.value\nDOMAIN: str = LogFields.DOMAIN.value\nINSTANCE_ID: str = LogFields.INSTANCE_ID.value\nLOG_CODE: str = LogFields.LOG_CODE.value\nLOG_MESSAGE: str = LogFields.LOG_MESSAGE.value\nPAYLOAD: str = LogFields.PAYLOAD.value\nSEVERITY: str = LogFields.SEVERITY.value\nTIMESTAMP: str = LogFields.TIMESTAMP.value\nSTACKTRACE: str = \"stacktrace\"\nALL_FIELDS_TO_SHAMBLE: list = DEFAULT_FIELDS_TO_SHAMBLE\nALL_FIELDS_TO_SHAMBLE.extend(SETTINGS.get(\"SHAMBLES\").split(\",\"))\nFIELDS_TO_SHAMBLE: list = [\n field.strip().lower() for field in ALL_FIELDS_TO_SHAMBLE\n]\nSHAMBLE_CHARACTER: str = \"*\"\nNOT_SET: str = \"NOT_SET\"\n\n\nclass TrafalgarLogFormatter(JsonFormatter):\n \"\"\"\n This is the class responsible for formatting the log record of the log\n event to the format of Trafalgar Log.\n This class is instantiated only one time at the boot of an application\n through the initialize_logger function.\n Its only function add_fields or any other functions that may be\n implemented on this class should never be called, since this is a class\n used automatically by the logging package.\n\n \"\"\"\n\n def add_fields(\n self,\n log_record: dict,\n record: LogRecord,\n message_dict: dict,\n ) -> NoReturn:\n \"\"\"\n The add_fields is the function responsible for the formatting\n process. This method should not be called in any circumstances,\n because it is called automatically each time a log event is created.\n\n :param log_record: dict: A dict containing information regarding the\n log event provided by the Logger._do_log function, such as\n log_code, payload and severity.\n :param record: LogRecord: The log record of the log event containing\n all automatically filled information provided by the logging\n package.\n :param message_dict: dict: A dict containing data that is not on the\n other two parameters.\n :returns: Nothing.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n from trafalgar_log.core.logger import Logger\n\n super(TrafalgarLogFormatter, self).add_fields(\n log_record, record, message_dict\n )\n\n log_record[APP] = SETTINGS.get(\"APP_NAME\")\n log_record[FLOW] = Logger.get_flow()\n log_record[CODE_LINE] = _get_code_line(record)\n log_record[CORRELATION_ID] = Logger.get_correlation_id()\n log_record[DATE_TIME] = _get_date_time(record)\n log_record[DOMAIN] = SETTINGS.get(DOMAIN)\n log_record[INSTANCE_ID] = Logger.get_instance_id()\n log_record[LOG_MESSAGE] = record.message\n log_record[TIMESTAMP] = _get_timestamp(record)\n\n _set_stacktrace(log_record)\n\n\ndef _get_os_paths() -> list:\n \"\"\"\n The _get_os_paths function returns a list of all the paths in sys.path\n that are not absolute (i.e., don't end with a slash). The returned list\n is sorted from longest to shortest path, to ensure that longer paths\n take precedence over shorter ones. For example,\n /opt/python_virtualenv/lib/python3.6/site-packages takes precedence over\n /opt/python_virtualenv/. Since _get_os_paths returns a copy of sys.path,\n this function can be called safely by other modules.\n\n :returns: A list of the OS paths.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return [\n \"\".join([path, os.sep])\n for path in sorted(\n map(os.path.abspath, sys.path), key=len, reverse=True\n )\n if not path.endswith(os.sep)\n ]\n\n\ndef _find_relative_path(record: LogRecord) -> str:\n \"\"\"\n The _find_relative_path function is used to find the relative path of a\n log event.\n\n The function is called by the _get_code_line function and takes in a\n record from the logging module. The record contains information about\n where the log was created, including what file it was created in and what\n line number it was on. The pathname attribute stores an absolute path to\n where this log message was created, while filename stores just its\n basename.\n\n :param record: LogRecord: The log record of the log event.\n :returns: The relative path of the log file from the os_paths.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n # ensure that the path separator is always the os separator\n pathname = record.pathname.replace(\"/\", os.sep)\n file_name = os.path.basename(record.filename)\n\n try:\n return next(\n os.path.relpath(pathname, path)\n for path in OS_PATHS\n if pathname.startswith(path)\n and file_name != os.path.relpath(pathname, path)\n )\n except StopIteration:\n return record.filename\n\n\ndef _get_code_line(record: LogRecord) -> str:\n \"\"\"\n The _get_code_line function is used by the\n TrafalgarLogFormatter.add_fields function to get the code\n line that a log record was created on. It does this by finding all the\n directories in between where this file (logging/__init__.py) is located\n and where the module containing `record` (the LogRecord object) is\n located, then joining them together with slashes (/).\n The result will be something like: mypackage/module.py - myfunc:10\n\n :param record: LogRecord: The log record of the log event.\n :returns: A string that contains the relative path to the file where the\n log record was created, followed by a dash, and then it returns a\n string containing the function name and line number of code where\n the log record was created.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n relativepath = _find_relative_path(record)\n\n return (\n f\"{relativepath.replace(os.sep, '/')} - \"\n f\"{record.funcName}:{record.lineno}\"\n )\n\n\ndef _get_date_time(record: LogRecord) -> str:\n \"\"\"\n The _get_date_time function returns a string representation of the log\n event date and time.\n\n :param record: LogRecord: The log record of the log event.\n :returns: A string of the date and time in ISO 8601 format\n (yyyy-MM-dd hh:mm:ss.SSS).\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return datetime.fromtimestamp(record.created, tz=None).isoformat(\n sep=\" \", timespec=\"milliseconds\"\n )\n\n\ndef _get_timestamp(record: LogRecord) -> int:\n \"\"\"\n The _get_timestamp function is a helper function that returns the\n timestamp of a log record in milliseconds.\n\n :param record: LogRecord: The log record of the log event.\n :returns: The time in milliseconds when the log record was created.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return int(record.created * 1000)\n\n\ndef _set_stacktrace(log_record: dict) -> NoReturn:\n \"\"\"\n The _set_stacktrace function is a helper function that is called by the\n logging.Formatter class to set the stacktrace field of each log record.\n\n :param log_record: dict: Set the stacktrace key in the log_record:dict\n parameter if the log record has \"exc_info\" key.\n :returns: Nothing.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n if log_record.get(\"exc_info\"):\n log_record[STACKTRACE] = log_record.pop(\"exc_info\").split(\"\\n\")\n\n\ndef _get_format() -> str:\n \"\"\"\n The _get_format function returns a string that can be used to format the\n log fields for output. The returned string is a concatenation of all the\n log field names, each preceded by % and enclosed in parentheses. This\n allows us to use Python's built-in logging module's formatting\n functionality.\n\n :returns: A string that is used as the format for a logging.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return \" \".join([f\"%({log_field.value})\" for log_field in LogFields])\n\n\ndef _get_formatter() -> TrafalgarLogFormatter:\n \"\"\"\n The _get_formatter function returns an instance of the\n TrafalgarLogFormatter class, which is then passed to the logging\n module's basicConfig function as the formatter keyword argument. This\n allows us to configure how our log messages are formatted before they are\n written out by setting attributes on this object.\n\n :returns: A TrafalgarLogFormatter object.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return TrafalgarLogFormatter(_get_format())\n\n\ndef _remove_handlers() -> NoReturn:\n \"\"\"\n The _remove_handlers function removes all handlers from the root logger.\n\n :returns: Nothing.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n root = logging.getLogger()\n\n if root.handlers:\n for handler in root.handlers:\n root.removeHandler(handler)\n\n\ndef _get_handler() -> StreamHandler:\n \"\"\"\n The _get_handler function creates a StreamHandler object and sets the\n formatter to the _get_formatter function.\n It then returns this handler.\n\n :returns: A StreamHandler object.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n log_handler = StreamHandler()\n log_handler.setFormatter(_get_formatter())\n return log_handler\n\n\ndef _shamble_list(value: list) -> list:\n \"\"\"\n The _shamble_list function takes a list and returns a new list with the\n same elements, but with each element replaced by SHAMBLE_CHARACTER. This\n is used to hide potentially sensitive information in the log event.\n\n :param value: list: Tell the function the list to shamble its values.\n :returns: The list of the parameter with its contents shambled.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return [SHAMBLE_CHARACTER for _ in value]\n\n\n# noinspection PyTypeChecker\ndef _is_iterable(obj: object) -> bool:\n \"\"\"\n The _is_iterable function is a helper function that is used to determine\n if an object is iterable.\n It does this by attempting to use the built-in Python iter() function on\n the object. If it succeeds, then _is_iterable returns True; otherwise it\n returns False.\n\n :param obj: object: The object to be tested if it is iterable.\n :returns: True if the object is iterable, False otherwise.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\n\ndef _is_primitive(obj: object) -> bool:\n \"\"\"\n The _is_primitive function is a helper function for the\n _should_shamble_primitive_value function.\n It returns True if obj is a primitive type (str, int, float), and False o\n therwise.\n\n :param obj: object: The object that should be checked if it is primitive.\n :returns: True if the object is a primitive (str, int, float or bool).\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return isinstance(obj, str) or (\n not hasattr(obj, \"__dict__\") and not _is_iterable(obj)\n )\n\n\ndef _should_shamble_primitive_value(key: str, value: object) -> bool:\n \"\"\"\n The _should_shamble_primitive_value function is used to determine whether\n a primitive value should be shambled. Primitive values are those that are\n not complex objects, such as lists or dictionaries.\n The _should_shamble_primitive_value function returns True if the key of\n the given value is in the list of keys to shambler, and False otherwise.\n\n :param key: str: The key of the value to check if it should be shambled.\n :param value: object: The value that should be validated if it is\n primitive.\n :returns: True if the value is a primitive, and it's key is in\n FIELDS_TO_SHAMBLE.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return key.lower() in FIELDS_TO_SHAMBLE and _is_primitive(value)\n\n\ndef _shamble_fields(payload: object) -> Union[dict, object]:\n \"\"\"\n The _shamble_fields function is a helper function that is used to replace\n the values of the fields in the payload with random values. This is done\n by using a dictionary comprehension to iterate through each key and\n value pair in the payload. The result of this function will be passed\n into _dict_replace_value which will recursively call itself on any nested\n dictionaries or lists.\n\n :param payload: object: The payload to have its field replaced with a\n new value.\n :returns: A dictionary with the same keys and values as the payload\n argument, except that all the values have been replaced by a new\n value or the payload itself if it is not a dictionary.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n if isinstance(payload, dict):\n return _dict_replace_value(payload)\n\n return payload\n\n\ndef _dict_replace_value(payload: dict) -> dict:\n \"\"\"\n # refs.: https://stackoverflow.com/a/60776516/7973282\n The _dict_replace_value function replaces all values in a dictionary with\n the SHAMBLE_CHARACTER character. This is done to prevent sensitive\n information from being exposed in the log event.\n\n :param payload: dict: The payload to be have its field replaced with a\n new value.\n :returns: A new dictionary with all values that are dictionaries replaced\n by a shambled version of the value.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n new_payload = {}\n\n for key, value in payload.items():\n if isinstance(value, dict):\n value = _dict_replace_value(value)\n elif isinstance(value, list):\n value = _shamble_list(value)\n elif _should_shamble_primitive_value(key, value):\n value = SHAMBLE_CHARACTER\n new_payload[key] = value\n return new_payload\n\n\ndef _to_json(payload: object) -> dict:\n \"\"\"\n The _to_json function is a helper function that converts an object to JSON.\n The _to_json function will convert all attributes within an object to\n key-value pairs and return a dict.\n If there are any nested objects, they will also be converted to dicts and\n so on.\n The skipkeys attribute is set to True so that if there are any\n unserializable attributes (which would happen if using default=lambda),\n they can be skipped instead of causing an infity loop.\n\n :param payload: object: The payload to be converted to JSON object.\n :returns: The JSON object.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return json.loads(\n json.dumps(\n payload,\n skipkeys=True,\n default=lambda o: o.__dict__ if hasattr(o, \"__dict__\") else str(o),\n )\n )\n\n\ndef get_payload(payload: object) -> Union[object, dict]:\n \"\"\"\n The get_payload function is a helper function that takes in an object and\n returns a dictionary. The get_payload function is used to convert the\n payload from a Python object into a JSON-serializable dictionary. This\n allows the user to pass in any arbitrary Python object as the payload.\n\n :param payload: object: Pass in the object that is to be converted into\n a JSON object.\n :returns: A JSON serialized version of the payload or payload itself if\n it is a primitive type.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n return _shamble_fields(_to_json(payload))\n\n\ndef initialize_logger() -> Logger:\n \"\"\"\n The initialize_logger function creates a logger object that is used to\n log messages.\n It also adds a handler to the logger object, which allows for logging of\n messages.\n It removes handlers to avoid conflict and log duplication.\n refs.: https://stackoverflow.com/a/45624044/7973282\n\n :returns: A logger object.\n :doc-author: Trelent and this project contributors.\n \"\"\"\n\n _remove_handlers()\n\n logger = logging.getLogger(SETTINGS.get(\"APP_NAME\"))\n logger.addHandler(_get_handler())\n logger.setLevel(logging.getLevelName(SETTINGS.get(\"HAKI\").upper()))\n\n return logger\n\n\nOS_PATHS = _get_os_paths()\n","repo_name":"victoraugustofd/trafalgar-log","sub_path":"trafalgar_log/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16461,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"62"} +{"seq_id":"37307552445","text":"from seaborn import distplot\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport xgboost as xgb\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nnp.random.seed(24)\n\n\nos.chdir('/home/petrichor/DS/Analytics_Vidhya_24_projects/3_Bigmart_Sales/')\ntrain = pd.read_csv('Train_UWu5bXk.csv')\ntest = pd.read_csv('Test_u94Q5KV.csv')\nsubmission = pd.read_csv('SampleSubmission_TmnO39y.csv')\n\n# comibne train and test data\ntrain['source'] = 'train'\ntest['source'] = 'test'\ndata = pd.concat([train, test], ignore_index=True, sort=False)\n\n# MVT\ndata['Item_Weight'] = data.groupby(['Item_Identifier'])[\n 'Item_Weight'].transform(lambda x: x.fillna(x.mean()))\ndata['Outlet_Size'] = data['Outlet_Size'].fillna('Small')\n\n# MVT + FE\ndata['Item_Visibility'] = data['Item_Visibility'].replace(0, np.nan)\ndata['Item_Visibility'] = data.groupby(['Item_Identifier'])[\n 'Item_Visibility'].transform(lambda x: x.fillna(x.mean()))\n\n# FE\n# Get the first two characters of ID:\ndata['Item_Fat_Content'] = data['Item_Fat_Content'].replace(\n {\n 'low fat': 'Low Fat',\n 'LF': 'Low Fat',\n 'reg': 'Regular'})\n\ndata['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])\ndata.loc[data['Item_Type_Combined'] ==\n \"Non-Consumable\", 'Item_Fat_Content'] = \"Non-Edible\"\ndata['Outlet_Age'] = 2013 - data['Outlet_Establishment_Year']\ndata = data.drop(['Item_Type', 'Item_Identifier',\n 'Outlet_Establishment_Year'], 1)\n\n# EN\ndata = pd.get_dummies(data, columns=['Item_Fat_Content', 'Item_Type_Combined', 'Outlet_Identifier', 'Outlet_Size',\n 'Outlet_Location_Type', 'Outlet_Type'])\n\n# SCALE\n# for col in data.select_dtypes(include=[np.int]).columns:\n# data[col] = data[col].astype(float)\n\n# scaler = StandardScaler()\n# data[['Item_Weight','Item_Visibility','Item_MRP','Outlet_Age']] = scaler.fit_transform(data[['Item_Weight','Item_Visibility','Item_MRP','Outlet_Age']])\n\n# segregate training and test data\ndata_train = data.loc[data.source == 'train']\ndata_test = data.loc[data.source == 'test']\n\ntarget = 'Item_Outlet_Sales'\npredictors = [x for x in data_train.columns if x not in [target]+['source']]\n\n# Fit the model on the data\n\nX = data_train[predictors]\ny = data_train[target]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\ndtrain = xgb.DMatrix(X_train.values, label=y_train.values)\ndtest = xgb.DMatrix(X_test.values, label=y_test.values)\n\nparam = {'max_depth': 5, 'eta': 0.1, 'silent': 1, 'objective': 'reg:linear', 'colsample_bytree': 0.8,\n 'min_child_weight': 1, 'gamma': 0, 'subsample': 0.8, 'colsample_bytree': 0.8, 'scale_pos_weight': 1}\n\n# specify validations set to watch performance\nwatchlist = [(dtest, 'eval'), (dtrain, 'train')]\nnbr = 1000\nesr = 50\nmodel = xgb.train(param, dtrain, nbr,\n early_stopping_rounds=esr, evals=watchlist)\n# WE COULD HAVE INCLUDED THE PARAMETERS(param) INSIDE THE XGB.TRAIN() FUNCTION INSTEAD OF DEFINING SEPERATELY AS FOLLOWS\n# model = xgb.train('max_depth'=5, 'eta'=0.1, 'silent'=1, 'objective'='reg:linear', 'colsample_bytree'=0.8, 'min_child_weight'=1, 'gamma'=0, 'subsample'=0.8, 'colsample_bytree'=0.8, 'scale_pos_weight'=1, dtrain, nbr, early_stopping_rounds=esr, evals=watchlist)\nxgb.plot_importance(model)\nplt.show()\n","repo_name":"lavajiit/big_mart_eda_modeling","sub_path":"r_9.py","file_name":"r_9.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"20212438265","text":"from time import sleep\n\n#timer\ndef timer(x):\n #split to h, min and sec\n try:\n s = x.split(':')\n sec = int(s[2])\n min = int(s[1])\n h = int(s[0])\n except:\n print('')\n print('MISSINPUT (input example: 0:0:10)')\n exit()\n\n #too big values fix\n if sec >= 60 or min >= 60: print('value too big(max_sec=60)')\n else:\n #time goes down\n while True:\n if sec > 0:\n sec = sec - 1\n elif sec <= 0:\n if min > 0:\n min = min - 1\n sec = 60\n elif min <= 0:\n if h > 0:\n h = h - 1\n min = 60\n else:\n break\n sleep(1)\n\n #visual representation\n if sec < 10: tsec = '0' + str(sec);\n else: tsec = str(sec)\n if min < 10: tmin = '0' + str(min);\n else: tmin = str(min)\n print(str(h) + ':' + str(min) +':'+ tsec)\n\n print('')\n print('[ ' + x + ' just passed' + ' ]')\n\n#user interaction\n\nprint('[ timer.py ]')\nprint('set timer. (ex. 0:0:10, hours:minutes:seconds)')\nprint('')\ntimer(input('>>'))\n","repo_name":"cyanLambda/pythonTimeProject","sub_path":"Timer.py","file_name":"Timer.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74366432196","text":"from typing import List, Set\n\n\ndef take_input(filepath):\n with open(filepath, 'r') as f:\n lines = f.readlines()\n answers = []\n current_answers = []\n for line in lines:\n line = line.strip()\n if not line:\n answers.append(current_answers)\n current_answers = []\n continue\n current_answers.append(line)\n answers.append(current_answers)\n return answers\n\n\ndef puzzle1(filepath):\n answers = take_input(filepath)\n count = 0\n for answer in answers:\n count += len(get_group_answers1(answer))\n return count\n\n\ndef puzzle2(filepath):\n answers = take_input(filepath)\n count = 0\n for answer in answers:\n count += len(get_group_answers2(answer))\n return count\n\n\ndef get_group_answers1(answers: List[str]) -> Set[str]:\n result = set()\n for answer in answers:\n for let in answer:\n result.add(let)\n return result\n\n\ndef get_group_answers2(answers: List[str]) -> Set[str]:\n qs = 'qwertyuiopasdfghjklzxcvbnm'\n result = set()\n for q in qs:\n yes = True\n for answer in answers:\n if q not in answer:\n yes = False\n break\n if yes:\n result.add(q)\n return result\n\n\nfilepath = '/Users/hkoklu/personal/advent_of_code/2020/day06.txt'\nprint(puzzle1(filepath))\nprint(puzzle2(filepath))\n","repo_name":"yabanci/advent-of-code","sub_path":"advent_of_code/2020/day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38829960633","text":"#!/usr/bin/env python3\nimport pygsti\nfrom load import load_3q\nfrom pygsti.algorithms import gaugeopt_to_target\nfrom pygsti.tools import timed_block\n\n\ndef main():\n gs_target = pygsti.construction.build_gateset(\n [8], [('Q0','Q1','Q2')],['Gx1','Gy1','Gx2','Gy2','Gx3','Gy3','Gcnot12','Gcnot23'],\n [ \"X(pi/2,Q0):I(Q1):I(Q2)\", \"Y(pi/2,Q0):I(Q1):I(Q2)\", \"I(Q0):X(pi/2,Q1):I(Q2)\", \"I(Q0):Y(pi/2,Q1):I(Q2)\",\n \"I(Q0):I(Q1):X(pi/2,Q2)\", \"I(Q0):I(Q1):Y(pi/2,Q2)\", \"CX(pi,Q0,Q1):I(Q2)\", \"I(Q0):CX(pi,Q1,Q2)\"],\n prep_labels=['rho0'], prep_expressions=[\"0\"],\n effect_labels=['E0','E1','E2','E3','E4','E5','E6'], effect_expressions=[\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"],\n spamdefs={'upupup': ('rho0','E0'), 'upupdn': ('rho0','E1'), 'updnup': ('rho0','E2'), 'updndn': ('rho0','E3'),\n 'dnupup': ('rho0','E4'), 'dnupdn': ('rho0','E5'), 'dndnup': ('rho0','E6'), 'dndndn': ('rho0','remainder')},\n basis=\"pp\")\n gs = load_3q()\n\n with timed_block('Basic gauge opt (3Q)'):\n gs_gaugeopt = gaugeopt_to_target(gs, gs_target, \n item_weights={'spam' : 0.0001, 'gates':1.0},\n spam_metric='frobenius',\n gates_metric='frobenius')\n\nif __name__ == '__main__':\n main()\n","repo_name":"pyGSTio/pyGSTi","sub_path":"scripts/profiling/gaugeopt/3q_basic.py","file_name":"3q_basic.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"62"} +{"seq_id":"17438101387","text":"import torch\r\nfrom torch import nn\r\nfrom Preparation import Get_Data,utils\r\nfrom torch.utils.data import DataLoader\r\n\r\ndef train(parameter):\r\n model_name = parameter.model_name\r\n if model_name == 'cycle_gan':\r\n from Model.cycle_gan import cycle_gan\r\n model = cycle_gan.model(parameter) # 这个model里有所有的网络,优化器,损失函数等等,还有一个step可以进行参数更新\r\n\r\n save_and_load = utils.save_and_load(model_name=model_name) # 用来读取和储存每一次训练后的epoch\r\n\r\n dataset = Get_Data.dataset(file=parameter.data_name)\r\n dataloader = DataLoader(dataset, batch_size=parameter.batchSize, shuffle=True, num_workers=0)\r\n batch_nums = len(dataloader) # 计算一共有多少个batch\r\n\r\n Display = utils.Display(parameter.n_epochs, batch_nums) # 显示损失和图像\r\n\r\n\r\n start_epoch = save_and_load.load_epoch() if parameter.continue_epoch==1 else parameter.epoch\r\n # 是否从上次的epoch开始训练\r\n\r\n for epoch in range(start_epoch, parameter.n_epochs):\r\n for batch, data in enumerate(dataloader): # 下面在每个batch里进行前向计算\r\n real_A = data['A']\r\n real_B = data['B']\r\n losses, images = model.step(real_A, real_B) #训练一个batch,并且返回损失和产生的图片\r\n Display.display(epoch=epoch + 1, batch=batch + 1, losses=losses, images=images,\r\n display_batch=parameter.display_batch)\r\n model.lr_step() #更新lr\r\n save_and_load.save_epoch(epoch) # 储存epoch,删除上一个epoch储存的epoch值\r\n model.save_models() # 以字典的形式储存模型,并且删除上一个epoch的模型\r\n # print(model.optimizer_D_A.state_dict()['param_groups'][0]['lr']) #测试一下看看学习率是否更新\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Mr-Guowang/Paper-Reproduction","sub_path":"Simple_cycle_gan/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"13067424354","text":"import numpy as np\r\nimport argparse\r\nimport importlib\r\nimport random\r\nimport os, time\r\nimport tensorflow as tf\r\nfrom flearn.utils.model_utils import read_data\r\n\r\n# GLOBAL PARAMETERS\r\nOPTIMIZERS = ['fedavg', 'fedprox', 'feddr', 'fedpd']\r\nDATASETS = ['FEMNIST', 'synthetic_iid', 'synthetic_0_0', 'synthetic_0.5_0.5', 'synthetic_1_1']\r\nREG_TYPE = ['none','l1_norm','l2_norm_squared','l2_norm','linf_norm']\r\n\r\nMODEL_PARAMS = {\r\n 'FEMNIST.ann': (26,), # num_classes\r\n 'synthetic.ann': (10, ) # num_classes\r\n}\r\n\r\n\r\ndef read_options():\r\n ''' Parse command line arguments or load defaults '''\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--optimizer',\r\n help='name of optimizer;',\r\n type=str,\r\n choices=OPTIMIZERS,\r\n default='fedavg')\r\n parser.add_argument('--dataset',\r\n help='name of dataset;',\r\n type=str,\r\n choices=DATASETS,\r\n default='nist')\r\n parser.add_argument('--model',\r\n help='name of model;',\r\n type=str,\r\n default='stacked_lstm.py')\r\n parser.add_argument('--num_rounds',\r\n help='number of rounds to simulate;',\r\n type=int,\r\n default=-1)\r\n parser.add_argument('--eval_every',\r\n help='evaluate every ____ rounds;',\r\n type=int,\r\n default=-1)\r\n parser.add_argument('--clients_per_round',\r\n help='number of clients trained per round;',\r\n type=int,\r\n default=-1)\r\n parser.add_argument('--batch_size',\r\n help='batch size when clients train on data;',\r\n type=int,\r\n default=10)\r\n parser.add_argument('--num_epochs', \r\n help='number of epochs when clients train on data;',\r\n type=int,\r\n default=1)\r\n parser.add_argument('--num_iters',\r\n help='number of iterations when clients train on data;',\r\n type=int,\r\n default=1)\r\n parser.add_argument('--learning_rate',\r\n help='learning rate for inner solver;',\r\n type=float,\r\n default=0.003)\r\n parser.add_argument('--mu',\r\n help='constant for prox;',\r\n type=float,\r\n default=0)\r\n parser.add_argument('--eta',\r\n help='constant for feddr;',\r\n type=float,\r\n default=1.0)\r\n parser.add_argument('--alpha',\r\n help='constant for feddr;',\r\n type=float,\r\n default=0.9)\r\n parser.add_argument('--seed',\r\n help='seed for randomness;',\r\n type=int,\r\n default=0)\r\n parser.add_argument('--drop_percent',\r\n help='percentage of slow devices',\r\n type=float,\r\n default=0.1)\r\n parser.add_argument('--reg_type',\r\n help='type of regularizer',\r\n type=str,\r\n choices=REG_TYPE,\r\n default='none')\r\n parser.add_argument('--reg_coeff',\r\n help='regularization parameter',\r\n type=float,\r\n default=0.01)\r\n parser.add_argument('--exp_id',\r\n help='experiment ID',\r\n type=str,\r\n default='')\r\n parser.add_argument('--log_suffix',\r\n help='string to append to file name',\r\n type=str,\r\n default='')\r\n\r\n try: parsed = vars(parser.parse_args())\r\n except IOError as msg: parser.error(str(msg))\r\n\r\n # Set seeds\r\n random.seed(1 + parsed['seed'])\r\n np.random.seed(12 + parsed['seed'])\r\n tf.set_random_seed(123 + parsed['seed'])\r\n\r\n # load selected model\r\n if parsed['dataset'].startswith(\"synthetic\"): # all synthetic datasets use the same model\r\n model_path = '%s.%s.%s.%s' % ('flearn', 'models', 'synthetic', parsed['model'])\r\n else:\r\n model_path = '%s.%s.%s.%s' % ('flearn', 'models', parsed['dataset'], parsed['model'])\r\n\r\n mod = importlib.import_module(model_path)\r\n learner = getattr(mod, 'Model')\r\n\r\n # load selected trainer\r\n opt_path = 'flearn.trainers.%s' % parsed['optimizer']\r\n mod = importlib.import_module(opt_path)\r\n optimizer = getattr(mod, 'Server')\r\n\r\n # add selected model parameter\r\n parsed['model_params'] = MODEL_PARAMS['.'.join(model_path.split('.')[2:])]\r\n\r\n # print and return\r\n maxLen = max([len(ii) for ii in parsed.keys()]);\r\n fmtString = '\\t%' + str(maxLen) + 's : %s';\r\n print('Arguments:')\r\n for keyPair in sorted(parsed.items()): print(fmtString % keyPair)\r\n\r\n return parsed, learner, optimizer\r\n\r\ndef main():\r\n # suppress tf warnings\r\n tf.logging.set_verbosity(tf.logging.ERROR)\r\n \r\n # parse command line arguments\r\n options, learner, optimizer = read_options()\r\n\r\n # read data\r\n train_path = os.path.join('data', options['dataset'], 'data', 'train')\r\n test_path = os.path.join('data', options['dataset'], 'data', 'test')\r\n dataset = read_data(train_path, test_path)\r\n\r\n users, groups, train_data, test_data = dataset\r\n\r\n # call appropriate trainer\r\n t = optimizer(options, learner, dataset)\r\n start = time.time()\r\n history = t.train()\r\n end= time.time()\r\n print('Total Training Time: {:.2f} s'.format(end - start))\r\n\r\n alg_name = options['optimizer']\r\n if len(options['log_suffix']) > 0:\r\n name_list = [ alg_name,options['dataset'],options['log_suffix']]\r\n else:\r\n name_list = [ alg_name,options['dataset']]\r\n \r\n file_name = '_'.join(name_list)\r\n log_folder = 'logs'\r\n if options['exp_id'] is None or len(options['exp_id']) < 1:\r\n exp_id = 'test_' + options['dataset']\r\n log_folder = os.path.join(log_folder,exp_id)\r\n else:\r\n log_folder = os.path.join(log_folder,options['exp_id'])\r\n\r\n save_df(history, log_folder, alg_name, file_name)\r\n\r\ndef save_df(df, log_folder, alg_name, file_name):\r\n \r\n\r\n if not os.path.isdir(log_folder):\r\n os.mkdir(log_folder)\r\n \r\n df.to_csv(os.path.join(log_folder, file_name +'.csv'), index=False) \r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"wenh06/fl_seminar","sub_path":"benchmarks/FedDR/FedDR/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"141977214","text":"from flask import Flask, render_template, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired, URL\nfrom flask_ckeditor import CKEditor, CKEditorField\nfrom datetime import date\n\n\n# Delete this code:\n# import requests\n# posts = requests.get(\"https://api.npoint.io/43644ec4f0013682fc0d\").json()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nckeditor = CKEditor(app)\nBootstrap(app)\n\n# CONNECT TO DB\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n# CONFIGURE TABLE\n\n\nclass BlogPost(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n subtitle = db.Column(db.String(250), nullable=False)\n date = db.Column(db.String(250), nullable=False)\n body = db.Column(db.Text, nullable=False)\n author = db.Column(db.String(250), nullable=False)\n img_url = db.Column(db.String(250), nullable=False)\n\n\n# WTForm\nclass CreatePostForm(FlaskForm):\n title = StringField(\"Blog Post Title\", validators=[DataRequired()])\n subtitle = StringField(\"Subtitle\", validators=[DataRequired()])\n author = StringField(\"Your Name\", validators=[DataRequired()])\n img_url = StringField(\"Blog Image URL\", validators=[DataRequired(), URL()])\n body = CKEditorField(\"Blog Content\", validators=[DataRequired()])\n submit = SubmitField(\"Submit Post\")\n\n\n@app.route('/')\ndef get_all_posts():\n posts = BlogPost.query.all()\n return render_template(\"index.html\", all_posts=posts)\n\n\n@app.route('/new-post', methods=['GET', 'POST'])\ndef new_post():\n form = CreatePostForm()\n\n if form.validate_on_submit():\n post = BlogPost(title=form.data['title'], subtitle=form.data['subtitle'], date=date.today().strftime(\n '%B %d, %Y'), body=form.data['body'], author=form.data['author'], img_url=form.data['img_url'])\n db.session.add(post)\n db.session.commit()\n return redirect(\"/\")\n\n return render_template(\"make-post.html\", form=form)\n\n\n@app.route(\"/post/\")\ndef show_post(index):\n requested_post = None\n posts = BlogPost.query.all()\n for blog_post in posts:\n if blog_post.id == index:\n requested_post = blog_post\n return render_template(\"post.html\", post=requested_post)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n\n@app.route(\"/edit-post/\", methods=['GET', 'POST'])\ndef edit_post(post_id):\n post = BlogPost.query.get(post_id)\n form = CreatePostForm(title=post.title, subtitle=post.subtitle,\n author=post.author, img_url=post.img_url, body=post.body)\n\n if form.validate_on_submit():\n post.title = form.data['title']\n post.subtitle = form.data['subtitle']\n post.date = date.today().strftime('%B %d, %Y')\n post.body = form.data['body']\n post.author = form.data['author']\n post.img_url = form.data['img_url']\n db.session.commit()\n return redirect(\"/\")\n\n return render_template(\"make-post.html\", form=form)\n\n\n@app.route(\"/delete-post/\")\ndef delete_post(post_id):\n post = BlogPost.query.get(post_id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"ioneone/100-days-of-python","sub_path":"day061-070/day067/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"4879758469","text":"import torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nfrom .multimodel.transformer import TransformerEncoder\r\n\r\nclass MULTModel(nn.Module):\r\n def __init__(self):\r\n \"\"\"\r\n Construct a MulT model.\r\n \"\"\"\r\n super(MULTModel, self).__init__()\r\n self.orig_d_a, self.orig_d_v =128, 768\r\n self.d_a, self.d_v = 128, 128\r\n self.vonly = True\r\n self.num_heads = 4\r\n self.layers = 5\r\n self.attn_dropout = 0.1\r\n self.attn_dropout_a = 0.0\r\n self.attn_dropout_v = 0.0\r\n self.relu_dropout = 0.1\r\n self.res_dropout = 0.1\r\n self.out_dropout = 0.0\r\n self.embed_dropout = 0.25\r\n self.attn_mask = False\r\n\r\n combined_dim = self.d_v # assuming d_l == d_a == d_v\r\n\r\n output_dim = 64 # This is actually not a hyperparameter :-)\r\n\r\n # 1. Temporal convolutional layers\r\n self.proj_a = nn.Conv1d(self.orig_d_a, self.d_a, kernel_size=1, padding=0, bias=False)\r\n self.proj_v = nn.Conv1d(self.orig_d_v, self.d_v, kernel_size=1, padding=0, bias=False)\r\n\r\n # 2. Crossmodal Attentions\r\n if self.vonly:\r\n self.trans_v_with_a = self.get_network(self_type='va')\r\n\r\n # 3. Self Attentions (Could be replaced by LSTMs, GRUs, etc.)\r\n # [e.g., self.trans_x_mem = nn.LSTM(self.d_x, self.d_x, 1)\r\n # self.trans_a_mem = self.get_network(self_type='a_mem', layers=3)\r\n self.trans_v_mem = self.get_network(self_type='av', layers=3)\r\n\r\n # Projection layers\r\n self.proj1 = nn.Linear(combined_dim, combined_dim)\r\n self.proj2 = nn.Linear(combined_dim, combined_dim)\r\n self.out_layer = nn.Linear(combined_dim, output_dim)\r\n\r\n def get_network(self, self_type='l', layers=-1):\r\n if self_type in ['l', 'al', 'vl']:\r\n embed_dim, attn_dropout = self.d_l, self.attn_dropout\r\n elif self_type in ['a', 'la', 'va']:\r\n embed_dim, attn_dropout = self.d_a, self.attn_dropout_a\r\n elif self_type in ['v', 'lv', 'av']:\r\n embed_dim, attn_dropout = self.d_v, self.attn_dropout_v\r\n elif self_type == 'l_mem':\r\n embed_dim, attn_dropout = 2 * self.d_l, self.attn_dropout\r\n elif self_type == 'a_mem':\r\n embed_dim, attn_dropout = 2 * self.d_a, self.attn_dropout\r\n elif self_type == 'v_mem':\r\n embed_dim, attn_dropout = 2 * self.d_v, self.attn_dropout\r\n else:\r\n raise ValueError(\"Unknown network type\")\r\n\r\n return TransformerEncoder(embed_dim=embed_dim,\r\n num_heads=self.num_heads,\r\n layers=max(self.layers, layers),\r\n attn_dropout=attn_dropout,\r\n relu_dropout=self.relu_dropout,\r\n res_dropout=self.res_dropout,\r\n embed_dropout=self.embed_dropout,\r\n attn_mask=self.attn_mask)\r\n\r\n def forward(self, x_v, x_a):\r\n \"\"\"\r\n text, audio, and vision should have dimension [batch_size, seq_len, n_features]\r\n \"\"\"\r\n # B N C\r\n x_a = x_a.transpose(1, 2)\r\n x_v = x_v.transpose(1, 2)\r\n\r\n # Project the textual/visual/audio features\r\n proj_x_a = x_a if self.orig_d_a == self.d_a else self.proj_a(x_a)\r\n proj_x_v = x_v if self.orig_d_v == self.d_v else self.proj_v(x_v)\r\n proj_x_a = proj_x_a.permute(2, 0, 1)\r\n proj_x_v = proj_x_v.permute(2, 0, 1)\r\n # N B C\r\n if self.vonly:\r\n # (L,A) --> V\r\n h_v_with_as = self.trans_v_with_a(proj_x_v, proj_x_a, proj_x_a)\r\n h_vs = h_v_with_as\r\n h_vs = self.trans_v_mem(h_vs)\r\n if type(h_vs) == tuple:\r\n h_vs = h_vs[0]\r\n last_hs = h_vs\r\n\r\n # last_hs = torch.cat([proj_x_v, last_h_v], dim=1)\r\n\r\n # A residual block\r\n last_hs_proj = self.proj2(F.dropout(F.relu(self.proj1(last_hs)), p=self.out_dropout, training=self.training))\r\n last_hs_proj += last_hs\r\n\r\n output = self.out_layer(last_hs_proj)\r\n return output\r\n\r\n","repo_name":"TongXu-05/REGNN-Multiple-Appropriate-Facial-Reaction-Generation","sub_path":"REGNN/models/MULTModel.py","file_name":"MULTModel.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"469385110","text":"\n\"\"\"\n\n\"\"\"\nimport os\nimport time\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom app.Common.basepage import BasePage\nfrom web.Common.my_logger import logger\nfrom web.Common.path_handler import screenshots_dir\nfrom web.TestData import global_data\n\n\nclass WebPage(BasePage):\n\n def switch_window(self, name=\"new\"):\n \"\"\"\n many windows used during test, switch between them.\n :param name: which window switch to, default is the last open one\n :return: None\n \"\"\"\n # need time to open the new window\n logger.info(\"switch window begin.\")\n time.sleep(1)\n # get all window handlers\n wins = self.driver.window_handles\n logger.info(f\"all windows handlers: {wins}\")\n # new, switch to the last open window\n if name == \"new\":\n # switch_to window/frame/\n logger.info(f\"Change window to: {wins[-1]}\")\n self.driver.switch_to.window(wins[-1])\n\n # use js to set readonly values\n def input_value_to_readonly_element(self, locator, page_operation, value):\n # get element\n logger.info(\"Input values to readonly element.\")\n ele = self.get_element(locator, page_operation)\n # arguments[0] [1], parameters for js\n js_code = 'arguments[0].removeAttribute(\"readonly\");' \\\n 'arguments[0].value = arguments[1];'\n\n # js, values from outside to js code\n self.driver.execute_script(js_code, ele, value)\n\n # forward to homepage\n def page_go_to_homepage(self):\n logger.info(\"set driver go to home page.\")\n self.driver.forward()\n\n # back to last page\n def page_back_to_last(self):\n logger.info(\"set driver back to the last page.\")\n self.driver.back()\n\n # go to forward page\n def page_forward_to(self):\n logger.info(\"set driver forward to page.\")\n self.driver.forward()\n\n # refresh current page\n def page_refresh(self):\n logger.info(\"set driver refresh current page.\")\n self.driver.refresh()\n\n\nif __name__ == '__main__':\n from selenium import webdriver\n from selenium.webdriver.chrome.service import Service\n driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\n\n base = BasePage(driver)\n driver.get(\"http://www.alex-info.ca:8000/login/\")\n\n loc = (By.NAME, \"username\")\n # base.wait_element_visible(loc, \"Login_Username\")\n\n # get element\n # ele = base.get_element(loc, \"Login_Username\", wait=\"x\")\n base.input_text(loc, \"Login_Username\", \"aaaaaa\")\n base.click_element((By.CLASS_NAME, \"input_submit\"), \"Login_Username\")\n\n print(base.get_text((By.XPATH, '//div[@id=\"error_info\"]'),\"Login_Submit-clicked\"))\n print(base.get_attribute((By.XPATH, '//div[@id=\"error_info\"]'), \"Login_Submit-clicked\", \"id\"))\n driver.close()\n","repo_name":"NWFT/autotest","sub_path":"app/Common/basepage_web.py","file_name":"basepage_web.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1438253173","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom .views import (\n LanguageViewSet,\n CategoryViewSet,\n SubcategoryViewSet,\n AssessmentViewSet,\n QuestionViewSet,\n ChoiceViewSet,\n AssessmentDifficultyRatingViewSet,\n FollowAssessmentViewSet,\n)\n\n\napp_name = \"assessments\"\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"languages\", LanguageViewSet, basename=\"languages\")\nrouter.register(r\"categories\", CategoryViewSet, basename=\"categories\")\nrouter.register(r\"subcategories\", SubcategoryViewSet, basename=\"subcategories\")\nrouter.register(r\"assessments\", AssessmentViewSet, basename=\"assessments\")\nrouter.register(r\"questions\", QuestionViewSet, basename=\"questions\")\nrouter.register(r\"choices\", ChoiceViewSet, basename=\"choice\")\nrouter.register(r\"assessments-difficulty\", AssessmentDifficultyRatingViewSet, basename=\"assessmentsdifficulty\")\nrouter.register(r\"follow-assessments\", FollowAssessmentViewSet, basename=\"followassessments\")\n\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n]\n","repo_name":"juanrios15/soma-server","sub_path":"apps/assessments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13285120842","text":"#!/usr/bin/env python\n\nimport csv\nimport sys\n\nclass TweaksList:\n\n\n def __init__(self):\n self.tweaks = []\n\n def read_inputs(self, file_name):\n with open(file_name, 'rb') as inputs:\n reader = csv.reader(inputs, delimiter='\\t', quotechar='|')\n for line in reader:\n self.tweaks.append(line)\n\n def get_tweak_parameters(self, sctg):\n for line in self.tweaks:\n if line[0] == sctg:\n return line\n return []\n\n\nclass AgpBuffer:\n\n\n def __init__(self, tweaks_list):\n self.tweaks_list = tweaks_list\n self.first_line = []\n self.second_line = []\n self.third_line = []\n self.ready_to_write = []\n\n def update(self, newline):\n self.ready_to_write[:] = []\n self.ready_to_write.append(self.first_line)\n self.first_line = self.second_line\n self.second_line = self.third_line\n self.third_line = newline\n\n def tweak_away(self):\n if len(self.second_line) == 0:\n return \n elif self.second_line[4] == \"W\":\n sctg = self.second_line[5]\n tweak = self.tweaks_list.get_tweak_parameters(sctg)\n if len(tweak) != 0:\n if tweak[1] == \"beginning\":\n self.tweak_begin(int(tweak[2]))\n else:\n self.tweak_end(int(tweak[2]))\n else:\n return \n\n def tweak_end(self, n):\n # Adjust column 3 of sctg\n old_sctg_end = int(self.second_line[2])\n new_sctg_end = str(old_sctg_end - n)\n self.second_line[2] = new_sctg_end\n # Adjust column 8 of sctg\n old_end = int(self.second_line[7])\n new_end = str(old_end - n)\n self.second_line[7] = new_end\n \n if self.third_line[4] == 'N':\n # Adjust column 2 of fragment\n old_frag_begin = int(self.third_line[1])\n new_frag_begin = str(old_frag_begin - n)\n self.third_line[1] = new_frag_begin\n # Adjust column 6 of fragment\n old_length = int(self.third_line[5])\n new_length = str(old_length + n)\n self.third_line[5] = new_length\n\n def tweak_begin(self, n):\n # Adjust column 2 of sctg\n old_sctg_begin = int(self.second_line[1])\n new_sctg_begin = str(old_sctg_begin + n)\n self.second_line[1] = new_sctg_begin\n # Adjust column 8 of sctg\n old_end = int(self.second_line[7])\n new_end = str(old_end - n)\n self.second_line[7] = new_end\n\n if self.first_line[4] == 'N':\n # Adjust column 3 of frag\n old_frag_end = int(self.first_line[2])\n new_frag_end = str(old_frag_end + n)\n self.first_line[2] = new_frag_end\n # Adjust column 6 of frag\n old_length = int(self.first_line[5])\n new_length = str(old_length + n)\n self.first_line[5] = new_length\n\n \n############################\nif __name__ == '__main__':\n \n # Check inputs ...\n if len(sys.argv) != 3:\n print(\"usage: python agp_tweaker.py <.agp file>\")\n print(\"\\nstuff-to-tweak file is tab-delimited in the format:\")\n print(\"component_id end/beginning number of bases to delete\")\n sys.exit()\n\n # Create TweaksList, AgpBuffer\n tl = TweaksList()\n tl.read_inputs(sys.argv[1])\n buff = AgpBuffer(tl)\n\n # Open .agp file and read first two lines into buffer\n with open(sys.argv[2]) as agp_file:\n reader = csv.reader(agp_file, delimiter='\\t', quotechar='|')\n writer = csv.writer(sys.stdout, delimiter='\\t', quoting=csv.QUOTE_NONE)\n for row in reader:\n buff.update(row)\n buff.tweak_away()\n for line in buff.ready_to_write:\n if len(buff.ready_to_write[0]) > 0:\n # (Avoids writing blank lines at beginning)\n writer.writerow(line) \n \n # Reached end of .agp, but still have rows in buffer.\n buff.update([])\n buff.tweak_away()\n for line in buff.ready_to_write:\n writer.writerow(line)\n buff.update([])\n buff.tweak_away()\n for line in buff.ready_to_write:\n writer.writerow(line)\n writer.writerow(buff.first_line)\n\n","repo_name":"genomeannotation/agp_tweaker","sub_path":"agp_tweaker.py","file_name":"agp_tweaker.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1478476743","text":"\"\"\"The module.\n\"\"\"\nfrom typing import List, Callable, Any\nfrom needle.autograd import Tensor\nfrom needle import ops\nimport needle.init as init\nimport numpy as np\n\n\nclass Parameter(Tensor):\n \"\"\"A special kind of tensor that represents parameters.\"\"\"\n\n\ndef _unpack_params(value: object) -> List[Tensor]:\n if isinstance(value, Parameter):\n return [value]\n elif isinstance(value, Module):\n return value.parameters()\n elif isinstance(value, dict):\n params = []\n for k, v in value.items():\n params += _unpack_params(v)\n return params\n elif isinstance(value, (list, tuple)):\n params = []\n for v in value:\n params += _unpack_params(v)\n return params\n else:\n return []\n\n\ndef _child_modules(value: object) -> List[\"Module\"]:\n if isinstance(value, Module):\n modules = [value]\n modules.extend(_child_modules(value.__dict__))\n return modules\n if isinstance(value, dict):\n modules = []\n for k, v in value.items():\n modules += _child_modules(v)\n return modules\n elif isinstance(value, (list, tuple)):\n modules = []\n for v in value:\n modules += _child_modules(v)\n return modules\n else:\n return []\n\n\nclass Module:\n def __init__(self):\n self.training = True\n\n def parameters(self) -> List[Tensor]:\n \"\"\"Return the list of parameters in the module.\"\"\"\n return _unpack_params(self.__dict__)\n\n def _children(self) -> List[\"Module\"]:\n return _child_modules(self.__dict__)\n\n def eval(self):\n self.training = False\n for m in self._children():\n m.training = False\n\n def train(self):\n self.training = True\n for m in self._children():\n m.training = True\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n\nclass Identity(Module):\n def forward(self, x):\n return x\n\n\nclass Linear(Module):\n def __init__(self, in_features, out_features, bias=True, device=None, dtype=\"float32\"):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.has_bias = bias\n init_weight = init.kaiming_uniform(in_features, out_features, device=device, dtype=dtype)\n init_bias = init.kaiming_uniform(out_features, 1, device=device, dtype=dtype)\n self.weight = Parameter(init_weight)\n self.bias = Parameter(ops.transpose(init_bias))\n\n def forward(self, X: Tensor) -> Tensor:\n y = ops.matmul(X, self.weight)\n if self.has_bias:\n y = ops.add(y, ops.broadcast_to(self.bias, y.shape))\n return y\n\n\nclass Flatten(Module):\n def forward(self, X):\n return ops.reshape(X, (X.shape[0], -1))\n\n\nclass ReLU(Module):\n def forward(self, x: Tensor) -> Tensor:\n return ops.relu(x)\n\n\nclass Sequential(Module):\n def __init__(self, *modules):\n super().__init__()\n self.modules = modules\n\n def forward(self, x: Tensor) -> Tensor:\n for layer in self.modules:\n x = layer(x)\n return x\n\n\nclass SoftmaxLoss(Module):\n def forward(self, logits: Tensor, y: Tensor):\n lse = ops.logsumexp(logits, 1)\n y_one_hot = init.one_hot(logits.shape[-1], y)\n zy = ops.summation(logits * y_one_hot, 1)\n return ops.summation(lse - zy) / logits.shape[0] \n\n\nclass BatchNorm1d(Module):\n def __init__(self, dim, eps=1e-5, momentum=0.1, device=None, dtype=\"float32\"):\n super().__init__()\n self.dim = dim\n self.eps = eps\n self.momentum = momentum\n self.weight = Parameter(init.ones(dim))\n self.bias = Parameter(init.zeros(dim))\n self.running_mean = init.zeros(dim)\n self.running_var = init.ones(dim)\n\n def forward(self, x: Tensor) -> Tensor:\n ex, dx = self.norm_train(x) if self.training else self.norm_test(x)\n w = ops.broadcast_to(ops.reshape(self.weight, (1, self.dim)), x.shape)\n b = ops.broadcast_to(ops.reshape(self.bias, (1, self.dim)), x.shape)\n return w * (x - ex) / (dx + self.eps) ** 0.5 + b\n\n def norm_train(self, x: Tensor) -> Tensor:\n ex = ops.summation(x, 0) / x.shape[0]\n self.running_mean = ((1 - self.momentum) * self.running_mean + self.momentum * ex).detach()\n ex = ops.broadcast_to(ops.reshape(ex, (1, self.dim)), x.shape)\n dx = ops.summation((x - ex) ** 2, 0) / x.shape[0]\n self.running_var = ((1 - self.momentum) * self.running_var + self.momentum * dx).detach()\n dx = ops.broadcast_to(ops.reshape(dx, (1, self.dim)), x.shape)\n return ex, dx\n\n def norm_test(self, x: Tensor) -> Tensor:\n ex = ops.broadcast_to(ops.reshape(self.running_mean, (1, self.dim)), x.shape)\n dx = ops.broadcast_to(ops.reshape(self.running_var, (1, self.dim)), x.shape)\n return ex, dx\n\n\nclass LayerNorm1d(Module):\n def __init__(self, dim, eps=1e-5, device=None, dtype=\"float32\"):\n super().__init__()\n self.dim = dim\n self.eps = eps\n self.weight = Parameter(init.ones(dim))\n self.bias = Parameter(init.zeros(dim))\n\n def forward(self, x: Tensor) -> Tensor:\n ex = ops.summation(x, 1) / x.shape[1]\n ex = ops.broadcast_to(ops.reshape(ex, (x.shape[0], 1)), x.shape)\n dx = ops.summation((x - ex) ** 2, 1) / x.shape[1]\n dx = ops.broadcast_to(ops.reshape(dx, (x.shape[0], 1)), x.shape)\n w = ops.broadcast_to(ops.reshape(self.weight, (1, self.dim)), x.shape)\n b = ops.broadcast_to(ops.reshape(self.bias, (1, self.dim)), x.shape)\n return w * (x - ex) / (dx + self.eps) ** 0.5 + b\n\n\nclass Dropout(Module):\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n self.flatten = Flatten()\n\n def forward(self, x: Tensor) -> Tensor:\n if self.training:\n y = init.randb(*self.flatten(x).shape, p=1 - self.p)\n x = x * ops.reshape(y, x.shape) / (1 - self.p)\n return x\n\n\nclass Residual(Module):\n def __init__(self, fn: Module):\n super().__init__()\n self.fn = fn\n\n def forward(self, x: Tensor) -> Tensor:\n return self.fn(x) + x\n","repo_name":"MuteApo/dlsys_hw2","sub_path":"python/needle/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"71225851078","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-np.pi, np.pi)\nthreshold = 0\ny = []\nthreshold_high = 1\nthreshold_low = 0\nfor v in x: # v as value\n if v >= threshold_high:\n y.append(1)\n elif v <= threshold_low:\n y.append(0)\n else:\n y.append(v)\nplt.figure()\nplt.scatter(x, y, marker='o')\nplt.show()\n","repo_name":"yc97463/amPyLab","sub_path":"thresholdGraph/2-high-low-threshold.py","file_name":"2-high-low-threshold.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15961549622","text":"from Model import CNN_Model\nfrom Model.Layer_obj import Layer_obj\nfrom Transformations import CNN_transformation\nfrom Training import CNN_Training\nimport numpy as np\nimport random\nimport copy\n\n\n'''\nrandomly pick Net2DeeperNet or Net2WiderNet \nThen randomly pick a layer to apply it on \nSkip the Pooling layers\nreturn an instruction for a new layer configuration \n'''\n\ndef random_sample(architecture, num_actions, fc_limit):\n architecture = copy.deepcopy(architecture) \n max_len = len(architecture) - 1 \n Wider = []\n Deeper = []\n\n num_deeper = num_actions[0] \n num_wider = num_actions[1]\n\n '''\n apply deeperNet first because it changes number of layers\n '''\n for i in range(num_deeper):\n (convIdx,fcIdx) = find_layers(architecture)\n '''\n if fc layers haven't reached its limit (as set by fc_limit)\n find from a list of all conv and fc index\n else: only sample from convIdx\n '''\n if (len(fcIdx) >= fc_limit):\n layer_num = random.choice(convIdx)\n else:\n layer_num = random.choice(fcIdx + convIdx)\n Deeper.append(layer_num)\n architecture.insert(layer_num+1, architecture[layer_num])\n\n for i in range(num_wider):\n (convIdx,fcIdx) = find_layers(architecture)\n layer_num = random.choice(fcIdx + convIdx)\n Wider.append(layer_num)\n\n return {'Wider':Wider,'Deeper':Deeper}\n\n'''\ngenerate a list of transformation instructions \nmax_actions indicates what are the maximum number of transformation actions allowed\nmax_duplicates indicates after how many duplicates are sampled, the algorithm will stop caring about duplicates (this is possible in early steps) \n'''\n\ndef random_search(architecture, sample_size, wider_max_actions, deeper_max_actions, max_duplicate, fc_limit):\n print (\"generate \" + str(sample_size) + \" of random configurations\")\n instructions = []\n counter = 0 \n for i in range(sample_size):\n equal = True \n sample = None\n while(equal):\n equal = False\n '''\n Randomly sample number of Net2WiderNet action and Net2DeeperNet action\n '''\n num_actions = [np.random.randint(wider_max_actions) + 1, np.random.randint(deeper_max_actions) + 1]\n sample = random_sample(architecture, num_actions, fc_limit)\n for instruction in instructions:\n if(instruction == sample):\n equal = True\n counter = counter + 1\n if(counter>max_duplicate):\n equal = False\n instructions.append(sample)\n \n return instructions\n\n'''\nsimply a helper function that finds conv and fc layers in the architecture (these are expandable)\n'''\ndef find_layers(architecture):\n fcIdx = []\n convIdx = []\n for i in range(0,len(architecture)):\n if(architecture[i].Ltype == 'conv'):\n convIdx.append(i)\n\n if(architecture[i].Ltype == 'fc'):\n fcIdx.append(i)\n return (convIdx, fcIdx)\n\n","repo_name":"shenyangHuang/CapacitySaturation","sub_path":"Search_Controller/Random_Search/CNN_RAS.py","file_name":"CNN_RAS.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"35816315979","text":"import os.path\nfrom tasks import export, normalize, extract, summarize\nfrom data.call_record import CallRecord\nfrom data.call_summary import CallSummary\nfrom tests.test_helpers import records_from_csv, transformed\n\n_DATA_SOURCE = \"./tests/test-sample-data.csv\"\n_DESTINATION = \"./tests/exported.ndjson\"\n\n\ndef test_raw_data_loader_generates_dictionary_from_csv():\n actual = extract.get_data(_DATA_SOURCE)\n expected = records_from_csv\n assert expected == actual\n\n\ndef test_transform_record_returns_transformed_record():\n actual = normalize._transform_record(records_from_csv[0])\n expected = {\n \"id\": \"84ef8-bfede-7972af\",\n \"call_id\": \"971-605-3989x98841\",\n \"start_time\": \"08/02/2020 9:12:26 AM\",\n \"end_time\": \"08/02/2020 9:23:25 AM\",\n \"customer_name\": \"Lisa Rosales\",\n \"agent_name\": \"Scott Lopez\",\n \"duration\": 659.0,\n \"metadata\": {\n \"user_agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3 rv:3.0; ar-IL) AppleWebKit/531.8.1 (KHTML, like Gecko) Version/4.0.1 Safari/531.8.1\",\n \"addr\": \"20.128.243.133\",\n \"acw\": \"7\",\n },\n }\n assert expected == actual\n\n\ndef test_call_record_constructor_receives_normalized_raw_data():\n actual = CallRecord(**normalize._transform_record(records_from_csv[0]))\n expected = CallRecord(\n id=\"84ef8-bfede-7972af\",\n call_id=\"971-605-3989x98841\",\n start_time=\"08/02/2020 9:12:26 AM\",\n end_time=\"08/02/2020 9:23:25 AM\",\n customer_name=\"Lisa Rosales\",\n agent_name=\"Scott Lopez\",\n duration=659.0,\n metadata={\n \"user_agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3 rv:3.0; ar-IL) AppleWebKit/531.8.1 (KHTML, like Gecko) Version/4.0.1 Safari/531.8.1\",\n \"addr\": \"20.128.243.133\",\n \"acw\": \"7\",\n },\n )\n assert expected == actual\n\n\ndef test_normalize_records_record_returns_all_records_normalized():\n actual = normalize.normalize(records_from_csv)\n expected = [CallRecord(**t) for t in transformed]\n\n assert expected == actual\n\n\ndef test_summarize_creates_summary_dictionary():\n actual = summarize.summarize([CallRecord(**t) for t in transformed])\n expected = {\n \"total_duration\": 2121,\n \"agent_durations\": {\n \"Mr. Kyle Miller\": 842,\n \"Paul Duncan\": 620,\n \"Scott Lopez\": 659,\n },\n }\n\n assert expected == actual\n\n\ndef test_summarize_creates_call_summary():\n actual = summarize.summarize([CallRecord(**t) for t in transformed])\n expected = CallSummary(\n total_duration=2121,\n agent_durations={\n \"Mr. Kyle Miller\": 842,\n \"Paul Duncan\": 620,\n \"Scott Lopez\": 659,\n },\n )\n\n assert expected == actual\n\n\ndef test_export_ndjson_to_file_exports_file():\n # set up: delete the file, if it's already present\n if os.path.isfile(_DESTINATION):\n os.remove(_DESTINATION)\n\n export.export_to_ndjson([CallRecord(**t) for t in transformed], _DESTINATION)\n assert os.path.isfile(_DESTINATION)\n","repo_name":"Bradleywboggs/call-data-test","sub_path":"tests/takehome_test_test.py","file_name":"takehome_test_test.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21565648240","text":"##################################\n# Data Ingestion\n# This code belongs to Umberto Griffo and is adjusted and containerized by Group 10.\n# Original code: https://github.com/umbertogriffo/Predictive-Maintenance-using-LSTM\n##################################\nimport json\n\nimport pandas as pd\nfrom flask import Flask, Response\nfrom pandas import DataFrame\n\nfrom resources.db_util import DBUtil\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\ndb_util = DBUtil()\n\n\n@app.route('/data/', methods=['POST'])\ndef create_table(table_name: str):\n # Select the correct data\n if table_name == 'train':\n df = pd.read_csv('Dataset/PM_train.txt', sep=\" \", header=None)\n df = prepare_train_or_test_df(df)\n elif table_name == 'test':\n df = pd.read_csv('Dataset/PM_test.txt', sep=\" \", header=None)\n df = prepare_train_or_test_df(df)\n elif table_name == 'truth':\n df = pd.read_csv('Dataset/PM_truth.txt', sep=\" \", header=None)\n df.drop(df.columns[1], axis=1, inplace=True)\n df.columns = ['value']\n else:\n return\n # Create the table\n db_util.create_tb(table_name=table_name, column_names=df.columns)\n json_df = df.to_json(orient='records')\n json_df = json.loads(json_df)\n db_util.add_data_records(table_name=table_name, records=json_df)\n # Report success\n return json.dumps({'message': f'the {table_name} table was created at /data/{table_name}'}, indent=4), 200\n\n\ndef prepare_train_or_test_df(df: DataFrame) -> DataFrame:\n df.drop(df.columns[[26, 27]], axis=1, inplace=True)\n df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',\n 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',\n 's15', 's16', 's17', 's18', 's19', 's20', 's21']\n return df.sort_values(['id', 'cycle'])\n\n\n@app.route('/data/', methods=['GET'])\ndef read_data(table_name):\n df = db_util.read_data_records(table_name)\n resp = Response(df.to_json(orient='records'), status=200, mimetype='application/json')\n return resp\n\n\napp.run(host='0.0.0.0', port=7270)\n","repo_name":"reneetheunissen/data-engineering-group-10","sub_path":"src/data_ingestion/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34591690699","text":"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.urls import reverse\n\nfrom django.views.generic import ListView\nfrom django.views.generic.base import View\n\nfrom fisher.books.models import Book\nfrom fisher.gift.models import Gift\nfrom fisher.libs.template_mail import send_html_mail\nfrom fisher.taskapp.celery import send_asyn_html_mail\nfrom fisher.wish.models import Wish\n\n\nclass save_to_wish(LoginRequiredMixin,View):\n\n def get(self, request, isbn):\n user = request.user\n if user.can_save_to_list(isbn):\n wish = Wish()\n wish.user = user\n wish.isbn = isbn\n wish.save()\n else:\n messages.success(self.request, '这本书已添加至你的赠送清单或者已存在与你的心愿清单,请不要重复添加')\n # return reverse('books:book_detail',kwargs = {\"isbn\":isbn})\n return redirect(reverse('books:book_detail',kwargs = {\"isbn\":isbn}))\n\nclass user_wish_view(LoginRequiredMixin,ListView):\n\n template_name = 'my_wish.html'\n\n def get_queryset(self):\n queryset = Wish.objects.filter(user=self.request.user)\n return queryset\n\n\nclass redraw_wish_view(LoginRequiredMixin,View):\n\n def get(self,request,wid):\n wish = Wish.objects.get(id = wid,user = request.user)\n wish.delete()\n return redirect(reverse('wish:user'))\n\nclass satisfy_wish_view(LoginRequiredMixin,View):\n\n def get(self,request,wid):\n user = request.user\n wish = Wish.objects.get(id=wid)\n gift = Gift.objects.get(user = user,isbn=wish.isbn,launched=False)\n book = Book.objects.get(isbn=wish.isbn)\n if not gift:\n messages.error(self.request,'你还没有上传此书,请点击\"添加到赠送清单\",添加前,请确保自己可以赠送此书')\n return redirect(reverse('books:book_detail',kwargs={'isbn':wish.isbn}))\n content = {\n 'wish':wish,\n 'gift':gift,\n 'book':book,\n }\n html_content = loader.render_to_string(\n 'email/satisify_wish.html', # 需要渲染的html模板\n content\n )\n\n send_asyn_html_mail.delay('有一本书要送给你',html_content,[wish.user.email])\n\n messages.success(self.request, f'已向{wish.user.username}发送邮件')\n return redirect(reverse('books:book_detail', kwargs={'isbn': wish.isbn}))\n","repo_name":"simida0755/fisher","sub_path":"fisher/wish/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"30398542904","text":"from MediaListDJ.ml.models import *\nfrom django.contrib import admin\nfrom django import forms\n\nclass DifferentlySizedTextarea(forms.Textarea):\n def __init__(self, *args, **kwargs):\n attrs = kwargs.setdefault('attrs', {})\n attrs.setdefault('cols', 200)\n attrs.setdefault('rows', 1)\n super(DifferentlySizedTextarea, self).__init__(*args, **kwargs)\n\nclass StatusInline(admin.TabularInline):\n model = Status\n extra = 5\n\nclass SourceMediaInline(admin.TabularInline):\n model = SourceMedia\n extra = 1\n\nclass MediaInfoInline(admin.StackedInline):\n model = MediaInfo\n\n\nclass MediaNotificationAdmin(admin.ModelAdmin):\n list_display = ['date','media','user','message','url','host']\n list_display_links = ['message']\n\nclass MediaAdmin(admin.ModelAdmin):\n list_display = ['id', 'name']\n list_display_links = ['id', 'name']\n list_filter = ['type']\n raw_id_fields = ['parent']\n search_fields = ['name']\n ordering = ['name']\n inlines = [MediaInfoInline,SourceMediaInline]\n\nclass MediaTypeAdmin(admin.ModelAdmin):\n list_display = ('name','parent')\n search_fields = ['name']\n inlines = [StatusInline]\n \nclass SourceAdmin(admin.ModelAdmin):\n list_display = ('name', 'importClass','domain')\n search_fields = ['name']\n formfield_overrides = { models.TextField: {'widget': DifferentlySizedTextarea}}\n\nclass BaseStatusAdmin(admin.ModelAdmin):\n #list_display = ('name',)\n search_fields = ['name']\n inlines = [StatusInline]\n \n\nadmin.site.register(Media, MediaAdmin)\nadmin.site.register(MediaType, MediaTypeAdmin)\nadmin.site.register(BaseStatus, BaseStatusAdmin)\nadmin.site.register(Source, SourceAdmin)\nadmin.site.register(MediaNotification, MediaNotificationAdmin)\n\n","repo_name":"rheide/Rankkeeper","sub_path":"src/MediaListDJ/ml/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19279137391","text":"import sys\nimport numpy as np\nfrom sklearn.manifold import MDS\nfrom scipy.stats import entropy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef main():\n cancer_type_list = load_types(args[1])\n cancer_type_num = []\n for x in cancer_type_list:\n File = open(\"data/data\" + args[1] + \"_\" + x + \".txt\", \"r\")\n cancer_type_num.append(int(File.readline().split()[0]))\n File.close()\n doc_num = sum(cancer_type_num)\n doc_arrange, alpha_list, Average_arrange = load_result(doc_num, cancer_type_list,\n cancer_type_num)\n markers = [\"o\", \"^\", \"s\"]\n c_palette = sns.color_palette(\"hls\", len(cancer_type_list))\n clf = MDS(n_components=2, dissimilarity=\"precomputed\", n_init=1, max_iter=100)\n JS_dis = np.zeros([len(doc_arrange), len(doc_arrange)])\n for i in range(len(doc_arrange)):\n for j in range(len(doc_arrange)):\n m = list()\n for k in range(K):\n m.append((doc_arrange[i][k] + doc_arrange[j][k])/2)\n JS_dis[i,j] = (entropy(doc_arrange[i], m)+ entropy(doc_arrange[j], m))/2\n transformed = clf.fit_transform(JS_dis)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n index = 0\n for i,x in enumerate(cancer_type_list):\n temp_first_components = []\n temp_second_components = []\n for j in range(cancer_type_num[i]):\n temp_first_components.append(transformed[index][0])\n temp_second_components.append(transformed[index][1])\n index += 1\n ax.scatter(temp_first_components, temp_second_components, label=x, \\\n c=c_palette[i], marker= markers[i%3], s=5)\n ax.legend(loc=\"upper left\", bbox_to_anchor=(1.0, 1), borderaxespad=0)\n fig.subplots_adjust(right=0.65)\n plt.title(\"MDS of activities (JS divergence)\")\n name = \"result/data\" + args[1] + \"/figure/\" + args[2]\\\n + \"_arrangement/mds_JS.png\"\n plt.savefig(name, dpi=300)\n plt.close(1)\n\ndef load_types(data_type):\n File = open('data/PL_data' + data_type + '_list.txt', 'r')\n File.readline()\n cancer_type_list = []\n for line in File.readlines():\n cancer_type_list.append(line[:-1])\n File.close()\n return cancer_type_list\n\ndef load_result(doc_num, cancer_type_list, cancer_type_num):\n doc_arrange = []\n alpha_list = []\n if(K <= 9):\n topic = '0' + args[2]\n else:\n topic = args[2]\n File = open('result/data' + args[1] + '/result_k' + topic + '.txt', 'r')\n File.readline(); File.readline();\n for i in range(K):\n File.readline()\n for i in range(doc_num):\n doc_arrange.append([])\n temp_list = File.readline().split()\n for j in range(K):\n doc_arrange[i].append(float(temp_list[j]))\n for i in range(len(cancer_type_list)):\n alpha_list.append([])\n temp_list = File.readline().split()\n for j in range(K):\n alpha_list[i].append(float(temp_list[j]))\n File.close()\n Average_arrange = []\n index = 0\n for i in range(len(cancer_type_list)):\n Average_arrange.append([0 for k in range(K)])\n for j in range(cancer_type_num[i]):\n for k in range(K):\n Average_arrange[i][k] += doc_arrange[index][k]\n index += 1\n for k in range(K):\n Average_arrange[i][k] /= cancer_type_num[i]\n return doc_arrange, alpha_list, Average_arrange\n\nif __name__ == \"__main__\":\n args = sys.argv ## args[1]: data_type, [2] : num_topic\n K = int(args[2])\n main()\n","repo_name":"qkirikigaku/Parallelized_LDA","sub_path":"Clustering/MDS.py","file_name":"MDS.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16529957476","text":"from collections import defaultdict\n\nimport numpy as np\n\n\nLOG_TYPES = (int, float, bool, np.float32, np.int64, np.ndarray)\n\n\nclass Info(object):\n def __init__(self, info=None):\n if info is None:\n info = defaultdict(list)\n self._info = info.copy()\n\n def add(self, info):\n if isinstance(info, Info):\n for k, v in info._info.items():\n self._info[k].extend(v)\n elif isinstance(info, dict):\n for k, v in info.items():\n if isinstance(v, list):\n self._info[k].extend(v)\n else:\n self._info[k].append(v)\n else:\n raise ValueError(\"info should be dict or Info (%s)\" % info)\n\n def clear(self):\n self._info = defaultdict(list)\n\n def get_dict(self, reduction=\"mean\", only_scalar=False):\n ret = {}\n for k, v in self._info.items():\n if np.isscalar(v):\n ret[k] = v\n elif isinstance(v[0], LOG_TYPES):\n if \"_mean\" in k or reduction == \"mean\":\n ret[k] = np.mean(v)\n elif reduction == \"sum\":\n ret[k] = np.sum(v)\n elif not only_scalar:\n ret[k] = v\n self.clear()\n return ret\n\n def __get_item__(self, key):\n return self._info[key]\n\n def __set_item__(self, key, value):\n self._info[key] = value\n\n def keys(self):\n return self._info.keys()\n\n def items(self):\n return self._info.items()\n","repo_name":"clvrai/furniture","sub_path":"furniture/util/info_dict.py","file_name":"info_dict.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":457,"dataset":"github-code","pt":"62"} +{"seq_id":"35075030059","text":"import os\n\nfrom .default import DefaultUtil\n\n\nclass FileUtil:\n\n @staticmethod\n def get_file_path(*path):\n file_path = os.path.join(*list(map(str, path)))\n\n if file_path.startswith('/') or file_path.startswith('~'):\n return file_path\n\n dir_path = os.path.join(DefaultUtil.OUT_PATH, os.path.dirname(file_path))\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n return os.path.join(DefaultUtil.OUT_PATH, file_path)\n\n @staticmethod\n def exists(path, dir=False, raise_exception=True):\n exists = os.path.exists(path) and (not os.path.isdir(path) if dir is False else os.path.isdir(path))\n\n if exists is False and raise_exception is True:\n raise IOError('%s not found: %s' % ('Directory' if dir is True else 'File', path))\n\n return exists\n\n @staticmethod\n def read_lines(file_path):\n with open(file_path, encoding='utf-8', errors='ignore') as f:\n return [x.strip() for x in f.readlines() if len(x.strip()) > 0]\n","repo_name":"AnjaniGourisaria/IRIS","sub_path":"iris/util/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"22739690664","text":"# Imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom holisticai.bias.metrics import classification_bias_metrics, regression_bias_metrics\n\n# Regression Plots\n# Recommender Plots\n# Multiclass Plots\n# Exploratory Plots\n# Classification Plots\n# Report Plots\nfrom holisticai.bias.plots import (\n abroca_plot,\n accuracy_bar_plot,\n bias_metrics_report,\n correlation_matrix_plot,\n disparate_impact_curve,\n disparate_impact_plot,\n distribution_plot,\n exposure_diff_plot,\n exposure_ratio_plot,\n frequency_matrix_plot,\n frequency_plot,\n group_pie_plot,\n histogram_plot,\n long_tail_plot,\n mae_bar_plot,\n rmse_bar_plot,\n statistical_parity_curve,\n statistical_parity_plot,\n success_rate_curve,\n success_rate_curves,\n)\n\n# Adult df\nfrom holisticai.datasets import load_adult\n\ndf = load_adult()[\"frame\"]\n\n# Last fm df\nfrom holisticai.datasets import load_last_fm\nfrom holisticai.utils import recommender_formatter\n\ndf_2 = load_last_fm()[\"frame\"]\ndf_2[\"score\"] = np.ones(len(df_2))\ndf_pivot, p_attr = recommender_formatter(\n df_2, users_col=\"user\", groups_col=\"sex\", items_col=\"artist\", scores_col=\"score\"\n)\n\n\ndef test_abroca_plot(monkeypatch):\n \"\"\"test_abroca_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n y_true = df[\"class\"] == \">50K\"\n y_score = np.random.random(len(y_true))\n group_a = df[\"sex\"] == \"Male\"\n group_b = df[\"sex\"] == \"Female\"\n abroca_plot(group_a, group_b, y_score, y_true)\n assert True\n\n\ndef test_abroca_plot_aux(monkeypatch):\n \"\"\"test_abroca_plot auxiliary\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n y_true = df[\"class\"] == \">50K\"\n y_score = np.random.random(len(y_true))\n group_a = df[\"sex\"] == \"Male\"\n group_b = df[\"sex\"] == \"Female\"\n abroca_plot(group_a, group_b, y_score, y_true, ax=None, size=(5, 5), title=\"TITLE\")\n assert True\n\n\ndef test_distribution_plot(monkeypatch):\n \"\"\"test_distribution_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n distribution_plot(\n df[\"age\"], df[\"education-num\"], ax=None, size=(20, 10), title=\"blabla\"\n )\n assert True\n\n\ndef test_group_pie_plot(monkeypatch):\n \"\"\"test_group_pie_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n group_pie_plot(np.array(df[\"occupation\"]), ax=ax, title=\"occupation\", size=(10, 10))\n assert True\n\n\ndef test_histogram_plot(monkeypatch):\n \"\"\"test_histogram_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n histogram_plot(\n df[\"education\"], p_attr=df[\"sex\"], ax=ax, size=(10, 7), title=\"BLUBH\"\n )\n assert True\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_correlation_plot_non_numerical_data(monkeypatch):\n \"\"\"test_correlation_plot: This test should fail because the data is not numerical\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n correlation_matrix_plot(\n df,\n target_feature=\"class\",\n n_features=10,\n cmap=\"YlGnBu\",\n ax=ax,\n size=None,\n title=None,\n )\n\n\ndef test_correlation_plot_numerical_data(monkeypatch):\n \"\"\"test_correlation_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n # ensure dataframes are numerical\n df_ = df.copy()\n df_clean = df_.iloc[\n :, [i for i, n in enumerate(df_.isna().sum(axis=0).T.values) if n < 100]\n ]\n df_clean.drop(\n columns=[\"sex\", \"race\", \"education\", \"marital-status\", \"relationship\"],\n inplace=True,\n )\n df_clean[\"class\"].replace({\">50K\": 1, \"<=50K\": 0}, inplace=True)\n correlation_matrix_plot(\n df_clean,\n target_feature=\"class\",\n n_features=5,\n cmap=\"YlGnBu\",\n ax=ax,\n size=None,\n title=None,\n )\n assert True\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_correlation_plot_numerical_data_no_feature(monkeypatch):\n \"\"\"test_correlation_plot: This test should fail because the feature is not in the dataframe\"\"\"\n from sklearn.datasets import load_diabetes\n\n dataset = load_diabetes() # numerical dataset\n X = dataset.data\n feature_names = dataset.feature_names\n X = pd.DataFrame(X, columns=feature_names)\n\n correlation_matrix_plot(X, target_feature=\"ages\", size=(12, 7))\n\n\ndef test_frequency_plot(monkeypatch):\n \"\"\"test_frequency_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n frequency_plot(p_attr=df[\"sex\"], y_pred=df[\"class\"])\n assert True\n\n\ndef test_frequency_matrix_plot(monkeypatch):\n \"\"\"test_frequency_matrix_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n frequency_matrix_plot(p_attr=df[\"sex\"], y_pred=df[\"class\"], ax=ax)\n assert True\n\n\ndef test_statistical_parity_plot(monkeypatch):\n \"\"\"test_statistical_parity_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n statistical_parity_plot(p_attr=df[\"sex\"], y_pred=df[\"class\"] == \">50K\")\n assert True\n\n\ndef test_disparate_impact_plot(monkeypatch):\n \"\"\"test_disparate_impact_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n disparate_impact_plot(p_attr=df[\"sex\"], y_pred=df[\"class\"] == \">50K\", ax=ax)\n assert True\n\n\ndef test_accuracy_bar_plot(monkeypatch):\n \"\"\"test_accuracy_bar_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n _, ax = plt.subplots()\n accuracy_bar_plot(\n p_attr=df[\"sex\"],\n y_pred=df[\"class\"] == \">50K\",\n y_true=df[\"class\"] == \">50K\",\n ax=ax,\n )\n assert True\n\n\ndef test_success_rate_curve(monkeypatch):\n \"\"\"test_success_rate_curve\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = df[\"sex\"] == \"Female\"\n group_b = df[\"sex\"] == \"Male\"\n y_pred = df[\"age\"]\n success_rate_curve(np.array(group_a), np.array(group_b), np.array(y_pred))\n assert True\n\n\ndef test_statistical_parity_curve(monkeypatch):\n \"\"\"test_statistical_parity_curve\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = df[\"sex\"] == \"Female\"\n group_b = df[\"sex\"] == \"Male\"\n y_pred = df[\"age\"]\n statistical_parity_curve(np.array(group_a), np.array(group_b), np.array(y_pred))\n assert True\n\n\ndef test_statistical_parity_curve_aux(monkeypatch):\n \"\"\"test_statistical_parity_curve aux\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = df[\"sex\"] == \"Female\"\n group_b = df[\"sex\"] == \"Male\"\n y_pred = df[\"age\"]\n statistical_parity_curve(\n np.array(group_a), np.array(group_b), np.array(y_pred), x_axis=\"quantile\"\n )\n assert True\n\n\ndef test_disparate_impact_curve(monkeypatch):\n \"\"\"test_disparate_impact_curve\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = df[\"sex\"] == \"Female\"\n group_b = df[\"sex\"] == \"Male\"\n y_pred = df[\"age\"]\n disparate_impact_curve(np.array(group_a), np.array(group_b), np.array(y_pred))\n assert True\n\n\ndef test_success_rate_curves(monkeypatch):\n \"\"\"test_success_rate_curves\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n success_rate_curves(df[\"education\"], np.array(df[\"age\"]), size=(10, 10))\n assert True\n\n\ndef test_rmse_bar_plot(monkeypatch):\n \"\"\"test_rmse_bar_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n y_pred = df[\"class\"] == \"50K\"\n y_true = 1 - y_pred\n rmse_bar_plot(df[\"education\"], y_pred, y_true, ax=None, size=None, title=None)\n assert True\n\n\ndef test_mae_bar_plot(monkeypatch):\n \"\"\"test_mae_bar_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n y_pred = df[\"class\"] == \"50K\"\n y_true = 1 - y_pred\n mae_bar_plot(df[\"education\"], y_pred, y_true, ax=None, size=None, title=None)\n assert True\n\n\ndef test_long_tail_plot(monkeypatch):\n \"\"\"test_long_tail_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n mat_pred = df_pivot.to_numpy()\n long_tail_plot(mat_pred, top=None, thresh=0.5, normalize=False)\n assert True\n\n\ndef test_long_tail_plot_aux(monkeypatch):\n \"\"\"test_long_tail_plot auxiliary\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n mat_pred = df_pivot.to_numpy()\n long_tail_plot(mat_pred, top=100, thresh=0.5, normalize=False)\n assert True\n\n\ndef test_exposure_diff_plot(monkeypatch):\n \"\"\"test_exposure_diff_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = p_attr == \"f\"\n group_b = p_attr == \"m\"\n mat_pred = df_pivot.to_numpy()\n exposure_diff_plot(\n group_a,\n group_b,\n mat_pred,\n top=None,\n thresh=0.5,\n normalize=False,\n ax=None,\n size=None,\n title=None,\n )\n assert True\n\n\ndef test_exposure_ratio_plot(monkeypatch):\n \"\"\"test_exposure_ratio_plot\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = p_attr == \"f\"\n group_b = p_attr == \"m\"\n mat_pred = df_pivot.to_numpy()\n exposure_ratio_plot(\n group_a,\n group_b,\n mat_pred,\n top=None,\n thresh=0.5,\n normalize=False,\n ax=None,\n size=None,\n title=None,\n )\n assert True\n\n\ndef test_bias_report_regression(monkeypatch):\n \"\"\"test_bias_report_regression\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = df[\"sex\"] == \"Female\"\n group_b = df[\"sex\"] == \"Male\"\n y_true = df[\"age\"]\n y_pred = np.random.random(y_true.shape)\n metrics = regression_bias_metrics(\n group_a, group_b, y_pred, y_true, metric_type=\"both\"\n )\n mitigated = regression_bias_metrics(\n group_a, group_b, y_pred, y_true, metric_type=\"both\"\n )\n bias_metrics_report(\"regression\", metrics)\n bias_metrics_report(\"regression\", metrics, mitigated)\n assert True\n\n\ndef test_bias_report_classification(monkeypatch):\n \"\"\"test_bias_report_classification\"\"\"\n monkeypatch.setattr(plt, \"show\", lambda: None)\n group_a = df[\"sex\"] == \"Female\"\n group_b = df[\"sex\"] == \"Male\"\n df[\"class\"] = df[\"class\"].apply(lambda x: 1 if x == \">50K\" else 0)\n y_true = df[\"class\"]\n y_pred = np.random.randint(2, size=y_true.shape)\n metrics = classification_bias_metrics(\n group_a, group_b, y_pred, y_true, metric_type=\"both\"\n )\n mitigated = classification_bias_metrics(\n group_a, group_b, y_pred, y_true, metric_type=\"both\"\n )\n bias_metrics_report(\"binary_classification\", metrics)\n bias_metrics_report(\"binary_classification\", metrics, mitigated)\n assert True\n","repo_name":"holistic-ai/holisticai","sub_path":"tests/bias/plots/test_all_plots.py","file_name":"test_all_plots.py","file_ext":"py","file_size_in_byte":10435,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"62"} +{"seq_id":"15701988834","text":"\"\"\"\r\nmodule with names that should not be changed during the execution of the system.\r\n\r\nSome are constants (INTRA, INTER) others control system behaviour\r\n\r\n\"\"\"\r\nimport sys\r\n\r\n# NO CHANGES\r\n# to index array in node object: ugly but I found no other way in python\r\nINTRA = 0\r\nINTER = 1\r\n\r\n# in case we want the max degree of a node in a community\r\n# < community_size *( max degree in the largest community / largest community size)\r\n# set ADJUSTED_MAX_DEGREE = True\r\nADJUSTED_MAX_DEGREE = False\r\n\r\n# maximum number of cycles to try to link a community or a network before giving up\r\nMAX_CYCLES = 100\r\n\r\n# do not allow disjoint communities (can happen if minimum degree < community size -1\r\nALLOW_DISJOINT_COMMUNITIES = False\r\n\r\n# joint distribution of node degrees, auxiliary variables\r\nALFA_MAX = 5 # Strongest dissortative (used for beta_assortativity distribution as \r\n# alfa_assortativity and beta_assortativity parameters)\r\nBETA_MAX = 5 # Strongest assortative\r\n\r\nEVENT_DICT = {'N': 'Begin',\r\n 'R': 'Regenerate',\r\n 'G': 'Grow',\r\n 'C': 'Contract',\r\n 'P': 'Preserve',\r\n 'O': 'Replace',\r\n 'S': 'Split',\r\n 'M': 'Merge',\r\n 'F': 'Vanish',\r\n 'A': 'Absorb',\r\n 'B': 'Resurge'}\r\n\r\nTYPE_DICT = {'S': 'Start', # current state\r\n 'E': 'End', # what happened at the end of the transition\r\n 'T': 'To',\r\n 'F': 'From'}\r\n\r\n\r\n# Create a global timestamp for dynamic time management\r\nclass Ts:\r\n timestamp = 0\r\n\r\n\r\nsys.setrecursionlimit(10000) # needed for scanning the network for disjoint communities\r\n","repo_name":"ramadap/Community-Lifecycle","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"2115527916","text":"class Solution(object):\n def isValid(self, s):\n if len(s)%2 == 1:\n return False\n arr = []\n braces = {\"{\":\"}\",\"[\":\"]\",\"(\":\")\"}\n \n for c in s:\n if c in braces:\n arr.append(c)\n elif len(arr) != 0 and c == braces[arr[-1]]:\n arr.pop()\n else:\n return False\n \n return len(arr) == 0\n ","repo_name":"DCtron-lab/DSA","sub_path":"Leetcode/20_Valid_Parentheses.py","file_name":"20_Valid_Parentheses.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16432705815","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\ndef best_record_company():\n df = pd.read_csv('src/UK-top40-1964-1-2.tsv', sep=\"\\t\")\n best_of = df.groupby(\"Publisher\").sum()['WoC'].idxmax()\n return df[df['Publisher'] == best_of]\n\ndef main():\n df = best_record_company()\n print(df)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nopomi/hy-data-analysis-python-2019","sub_path":"hy-data-analysis-with-python-2020/part05-e05_best_record_company/src/best_record_company.py","file_name":"best_record_company.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"70189338439","text":"\nimport os\n\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\nimport numpy as np\nimport openslide\nfrom torchvision.transforms import functional as F\nimport nrrd\nimport torch\nfrom torch import nn, Tensor\nfrom torch.utils.data import Dataset, DataLoader\nfrom typing import Tuple, Dict, Optional\nfrom pytorch_lightning import LightningDataModule\nfrom sklearn.model_selection import train_test_split\nfrom skimage import morphology as morph\n\ndef get_bbox_from_mask(mask):\n pos = np.where(mask==255)\n if pos[0].shape[0] == 0:\n return np.zeros((0, 4))\n else:\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n return [xmin, ymin, xmax, ymax]\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, target):\n for t in self.transforms:\n image, target = t(image, target)\n return image, target\n\n\nclass ToTensor(nn.Module):\n def forward(self, image: Tensor,\n target: Optional[Dict[str, Tensor]] = None) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n image = F.to_tensor(image)\n return image, target\n\n\ntransform = Compose([ToTensor(),\n ])\n\n\nclass MFDataset(Dataset):\n\n def __init__(self,\n df,\n data_source='svs_files',\n wsi_folder=None,\n mask_folder=None,\n nrrd_path=None,\n augmentation=None,\n normalization=None,\n inference=False,\n ):\n\n self.df = df\n self.wsi_folder = wsi_folder\n self.mask_folder = mask_folder\n self.nrrd_path = nrrd_path\n self.data_source = data_source\n self.transform = Compose([ToTensor(),\n ])\n\n self.augmentation = augmentation\n self.normalization = normalization\n self.inference = inference\n\n def __len__(self):\n return self.df.shape[0]\n\n def __getitem__(self, i):\n\n vis_level = 0\n dim = (256, 256)\n if self.data_source == 'svs_files':\n\n index = self.df['index'][i]\n filename = self.df['SVS_ID'][i]\n top_left = (self.df['coords_x'][i], self.df['coords_y'][i])\n wsi_object = openslide.open_slide(self.wsi_folder + '{}.svs'.format(filename))\n img = np.array(wsi_object.read_region(top_left, vis_level, dim).convert(\"RGB\"))\n\n num_objs = 1\n label = self.df['num_objs'][i]\n mask = np.load(self.mask_folder + '{}_masks.npy'.format(filename))[index]\n\n elif self.data_source == 'nrrd_files':\n custom_field_map = {\n 'SVS_ID': 'string',\n 'top_left': 'int list',\n 'center': 'int list',\n 'dim': 'int list',\n 'vis_level': 'int',\n 'diagnosis': 'string',\n 'annotation_label': 'string',\n 'mask': 'double matrix'}\n\n data, header = nrrd.read(os.path.join(self.nrrd_path, self.df['nrrd_file'][i]), custom_field_map)\n img = data[256:, 256:, :]\n mask = header['mask'][256:, 256:].astype('bool')\n #mask = morph.remove_small_objects(mask, min_size=300)\n mask = np.array(255 * mask)\n\n num_objs = 1\n\n if self.df['ann_label'][i] == 'yes':\n label = 1\n elif self.df['ann_label'][i] == 'no':\n label = 0\n\n if self.augmentation is not None:\n transformed = self.augmentation(image=img, mask=mask)\n img = transformed[\"image\"]\n mask = transformed[\"mask\"]\n\n masks = mask[np.newaxis, :, :]\n boxes = []\n area = []\n labels = []\n\n for n in range(num_objs):\n box = get_bbox_from_mask(masks[n])\n boxes.append(box)\n area.append((box[2] - box[0]) * (box[3] - box[1]))\n labels.append(label)\n\n obj_ids = np.array([255])\n masks = mask == obj_ids[:, None, None]\n\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n labels = torch.as_tensor(labels, dtype=torch.int64)\n area = torch.as_tensor(area, dtype=torch.float32)\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n image_id = torch.tensor([i])\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = image_id\n target['area'] = area\n target[\"iscrowd\"] = iscrowd\n target[\"masks\"] = masks\n\n if self.transform is not None:\n img, target = self.transform(img, target)\n\n if self.normalization is not None:\n img = self.normalization(img)\n\n if self.inference:\n return img\n else:\n return img, target\n\nclass MixDataset(Dataset):\n\n def __init__(self,\n df,\n\n wsi_folder,\n mask_folder=None,\n masked_input=True,\n data_source='nrrd_files',\n dim=(64, 64),\n vis_level=0,\n channels=3,\n nrrd_path=None,\n transform=None,\n extract_feature=False,\n feature_setting=None,\n inference=False):\n\n self.df = df\n self.wsi_folder = wsi_folder\n self.mask_folder = mask_folder\n self.masked_input = masked_input\n self.nrrd_path = nrrd_path\n self.data_source = data_source\n self.dim = dim\n self.vis_level = vis_level\n self.channels = channels\n self.extract_feature = extract_feature\n self.feature_setting = feature_setting\n self.transform = transform\n self.inference = inference\n\n def __getitem__(self, i):\n\n if self.data_source == 'svs_files':\n\n SVS_ID = self.df['SVS_ID'][i]\n top_left = (self.df['coords_x'][i], self.df['coords_y'][i])\n wsi_object = openslide.open_slide(self.wsi_folder + '{}.svs'.format(SVS_ID))\n\n if self.masked_input:\n index = self.df['index'][i]\n data = np.array(wsi_object.read_region(top_left, self.vis_level, (256, 256)).convert(\"RGB\"))\n mask = np.load(self.mask_folder + '{}_detected_masks.npy'.format(SVS_ID))[index]\n else:\n x = self.df['cell_x'][i]\n y = self.df['cell_y'][i]\n img = np.array(wsi_object.read_region((int(x), int(y)), self.vis_level, self.dim).convert(\"RGB\"))\n\n elif self.data_source == 'nrrd_files':\n custom_field_map = {\n 'SVS_ID': 'string',\n 'top_left': 'int list',\n 'center': 'int list',\n 'dim': 'int list',\n 'vis_level': 'int',\n 'diagnosis': 'string',\n 'annotation_label': 'string',\n 'mask': 'double matrix'}\n\n data, header = nrrd.read(os.path.join(self.nrrd_path, self.df['nrrd_file'][i]), custom_field_map)\n img = data[256:, 256:, :]\n mask = header['mask'][256:, 256:].astype('bool')\n mask = morph.remove_small_objects(mask, min_size=300)\n mask_3d = np.stack((mask, mask, mask), axis=-1)\n box = get_bbox_from_mask(np.array(255 * mask))\n center = ([(box[1]+box[3])/2, (box[0]+box[2])/2])\n\n if self.masked_input:\n masked_img = (img * mask_3d)[int(center[0] - 32):int(center[0] + 32), int(center[1] - 32):int(center[1] + 32), :]\n img = np.array(masked_img)\n else:\n img = img[int(center[0] - 32):int(center[0] + 32), int(center[1] - 32):int(center[1] + 32), :]\n \n if self.transform: img = self.transform(img)\n\n if self.extract_feature:\n feature_vector = extract_features(np.moveaxis(img.cpu().detach().numpy(), 0, -1),\n numPoints=self.feature_setting['numPoints'],\n radius=self.feature_setting['radius'],\n color_space=self.feature_setting['color_space'],\n spatial_size=self.feature_setting['spatial_size'],\n hist_bins=self.feature_setting['hist_bins'],\n orient=self.feature_setting['orient'],\n pix_per_cell=self.feature_setting['pix_per_cell'],\n cell_per_block=self.feature_setting['cell_per_block'],\n hog_channel=self.feature_setting['hog_channel'],\n stats_feat=self.feature_setting['stats_feat'],\n lbp_feat=self.feature_setting['lbp_feat'],\n spatial_feat=self.feature_setting['spatial_feat'],\n hist_feat=self.feature_setting['hist_feat'],\n hog_feat=self.feature_setting['hog_feat'])\n data = [img, feature_vector]\n else:\n data = img\n\n if self.inference:\n return data\n else:\n if self.data_source == 'svs_files':\n label = torch.as_tensor(self.df['gt_label'][i], dtype=torch.int64)\n elif self.data_source == 'nrrd_files':\n label = torch.as_tensor(self.df['ann_label'][i], dtype=torch.int64)\n\n return data, label\n\n def __len__(self):\n return self.df.shape[0]\n\n\nclass MFDataModule(LightningDataModule):\n def __init__(self,\n df_train,\n df_val,\n df_test,\n DataType='MFDataset',\n batch_size=2,\n num_of_worker=0,\n augmentation=None,\n normalization=None,\n train_transform=None,\n val_transform=None,\n inference=False,\n collate_fn=None,\n **kwargs):\n\n super().__init__()\n self.batch_size = batch_size\n self.num_of_worker = num_of_worker\n self.DataType = DataType\n self.collate_fn = collate_fn\n\n if self.DataType == 'MFDataset':\n self.train_data = MFDataset(df_train, augmentation=augmentation, normalization=normalization, inference=inference, **kwargs)\n\n self.val_data = MFDataset(df_val, augmentation=augmentation, normalization=normalization, inference=inference, **kwargs)\n self.test_data = MFDataset(df_test, augmentation=augmentation, normalization=normalization, inference=inference, **kwargs)\n\n\n elif self.DataType == 'MixDataset':\n self.train_data = MixDataset(df_train, transform=train_transform, **kwargs)\n self.val_data = MixDataset(df_val, transform=val_transform, **kwargs)\n self.test_data = MixDataset(df_test, transform=val_transform, **kwargs)\n\n def train_dataloader(self):\n return DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_of_worker, collate_fn=self.collate_fn)\n\n def val_dataloader(self):\n return DataLoader(self.val_data, batch_size=self.batch_size, shuffle=False, num_workers=self.num_of_worker, collate_fn=self.collate_fn)\n\n def test_dataloader(self):\n return DataLoader(self.test_data, batch_size=1, shuffle=False, num_workers=self.num_of_worker, collate_fn=self.collate_fn)\n","repo_name":"tmmschmit/DigitalPathologyAI","sub_path":"Dataloader/ObjectDetection.py","file_name":"ObjectDetection.py","file_ext":"py","file_size_in_byte":11783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41577020915","text":"\nimport streamlit as st\nfrom PIL import Image\nfrom helper_functions import *\nfrom text_eda.text_data import *\n\nfrom text_eda.text_data import text_data_app;\n\n# app setup \ntry:\n\n # create ss object\n if 'data' not in st.session_state:\n st.session_state.data = None\n\n # app design\n # app_meta('📚')\n # set_bg_hack('dqw_background.png')\n\n # set logo in sidebar using PIL\n logo = Image.open('logo.png')\n st.sidebar.image(logo, \n use_column_width=True)\n \n # hide warning for st.pyplot() deprecation\n st.set_option('deprecation.showPyplotGlobalUse', False)\n \n # Main panel setup\n display_app_header(main_txt='Text Analyszer',\n sub_txt='')\n\n st.markdown(\"\"\"---\"\"\")\n # provide options to user to navigate to other dqw apps\n \n text_data_app()\n\nexcept KeyError:\n st.error(\"Please select a key value from the dropdown to continue.\")\n \nexcept ValueError:\n st.error(\"Oops, something went wrong. Please check previous steps for inconsistent input.\")\n \nexcept TypeError:\n st.error(\"Oops, something went wrong. Please check previous steps for inconsistent input.\")\n","repo_name":"varmadeepak/textAnalyzer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6968694185","text":"from typing import List\nimport os\nimport config\n\ndef ReadTXT(path: str) -> str:\n with open(path, 'r', encoding='utf-8') as f:\n return f.read()\n\ndef WriteTXT(path: str, content: str) -> None:\n with open(path, 'w', encoding='utf-8') as f:\n f.write(content)\n\ndef ProcessTXT2Lines(content: str) -> List[str]:\n originList = content.splitlines()\n resultList = []\n for elem in originList:\n if len(elem) != 0 and elem[0] != '<':\n resultList.append(elem)\n return resultList\n\ndef ImportSentenceData() -> List[str]:\n resultList = []\n for root, dirs, files in os.walk(config.SEGMENTED_DATA_PATH):\n for f in files:\n path = os.path.join(root, f)\n content = ReadTXT(path)\n resultList.extend(ProcessTXT2Lines(content))\n return resultList\n\ndef ImportSentenceData2() -> List[str]:\n resultList = []\n path = os.path.join(config.PROCESSED_DATA_PATH, config.SENTENCE_FILENAME)\n content = ReadTXT(path)\n resultList.extend(ProcessTXT2Lines(content))\n return resultList\n\ndef WriteSentenceData() -> None:\n resultList = ImportSentenceData()\n path = os.path.join(config.PROCESSED_DATA_PATH, config.SENTENCE_FILENAME)\n with open(path, 'w', encoding='utf-8') as f:\n for elem in resultList:\n f.write(elem)\n f.write('\\n')","repo_name":"bjrjk/NLP-Exp","sub_path":"IO.py","file_name":"IO.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23732012842","text":"# __new__ : 当前创建对象的时候会调用__new__\n# __init__: 对象创建完成会调用init方法给对象添加对象属性及初始化的\n\n# 创建对象会自动调用两个方法,先调用__new__表示对象创建完成,然后再调用__init__给对象初始化\nclass Student(object):\n # new方法里面的参数是需要兼容init方法里面的参数的\n def __new__(cls, *args, **kwargs):\n print(\"创建对象\")\n print(args, kwargs)\n return object.__new__(cls)\n\n # 对象创建完了,给对象添加对象属性的\n def __init__(self, name, age):\n self.name = name\n self.age = age\n print(\"初始化\")\n\n\nstu = Student(\"李四\", 20)","repo_name":"EvaOEva/python1","sub_path":"15-魔法方法new方法.py","file_name":"15-魔法方法new方法.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39864835917","text":"import json\nimport typing\n\nimport requests\n\n\nclass DataReading(typing.NamedTuple):\n \"\"\"A data reading from a meter, with a quantity and a unit.\"\"\"\n quantity: float\n unit: str\n\n\nclass UnsuccessfulRequest(Exception):\n \"\"\"Raised when a request sent to the server is unsuccessful.\"\"\"\n pass\n\n\nclass NoResponseList(UnsuccessfulRequest):\n \"\"\"Raised when server returns no response list.\"\"\"\n def __init__(self, bulk_request):\n super().__init__(\n f\"The server returned no response list for {type(bulk_request)} bulk request\"\n )\n\n\nclass NoDataReading(UnsuccessfulRequest):\n \"\"\"Raised when server returns no data reading.\"\"\"\n def __init__(self, facility, instance):\n super().__init__(\n f\"The server returned no data reading for \\\"{facility}\\\" \\\"{instance}\\\"\"\n )\n\n\ndef get_value(facility, instance, live=False) -> DataReading:\n \"\"\"\n Get the reading of a meter through Building Energy Gateway.\n\n Although mostly compatible with building_data_requests' function of the same name,\n note that if the server returns no data,\n datareadingrequests' version will raise an exception.\n\n :param facility: The facility in which the meter is located.\n :param instance: The meter's number.\n :param live: (optional) Whether to get a live reading.\n By default, the function will get a cached reading.\n :return: a DataReading namedtuple consisting of (quantity, unit).\n \"\"\"\n args = {\n \"facility\": facility,\n \"instance\": instance,\n }\n if live:\n args[\"live\"] = True\n\n response = send_get_request(args)\n try:\n instance_response = response.json()[\"instance_response\"]\n except TypeError:\n raise NoDataReading(facility, instance)\n if not instance_response[\"success\"]:\n raise NoDataReading(facility, instance)\n\n data = instance_response[\"data\"]\n return DataReading(data[\"presentValue\"], data[\"units\"])\n\n\ndef get_bulk(bulk_request: typing.Iterable[typing.Dict]) -> typing.Dict:\n \"\"\"\n Get readings in bulk from Building Energy Gateway.\n\n Although mostly compatible with building_data_requests' function of the same name,\n note that if the server returns no data for any specific instance,\n datareadingrequests' version will raise an exception.\n\n :param bulk_request: An iterable, with each item specifying one instance.\n Each item should be a dictionary with keys \"facility\", \"instance\", and \"label\".\n \"label\" is optional.\n :return: A dictionary representing the server's JSON response.\n Inside this dictionary is [\"rsp_list\"], a list of the readings.\n [\"rsp_list\"] can be used to create a Pandas DataFrame.\n \"\"\"\n response = send_get_request({\"bulk\": json.dumps(bulk_request)})\n try:\n response_dict = response.json()\n except json.JSONDecodeError:\n raise NoResponseList(bulk_request)\n\n if len(response_dict[\"rsp_list\"]) == 0:\n raise NoResponseList(bulk_request)\n\n for r in response_dict[\"rsp_list\"]:\n if not r[\"success\"]:\n raise NoDataReading(r[\"facility\"], r[\"instance\"])\n\n return response_dict\n\n\ndef send_get_request(args):\n \"\"\"\n Send an HTTP GET request to Building Energy Gateway.\n\n This is the datareadingrequests equivalent of\n building_data_requests' post_request().\n Note that it does not retry requests without SSL.\n\n :param args: Arguments for the request.\n :return: Response object.\n \"\"\"\n return requests.get(\"https://energize.andoverma.us\", params=args)\n","repo_name":"seasonedfish/datareadingrequests","sub_path":"datareadingrequests.py","file_name":"datareadingrequests.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"74252479558","text":"from ..api import api as API\n\n\nclass AutoJSONRPCTest(API.testCase):\n\n def test_base(self):\n self.assertTrue('autojsonrpc' in self.api.resources)\n\n uri = self.reverse('autojsonrpc')\n self.assertEqual(uri, '/pirates/rpc')\n\n response = self.get_resource('autojsonrpc')\n self.assertContains(response, 'Invalid RPC Call.')\n\n# lint_ignore=C0110\n","repo_name":"klen/adrest","sub_path":"tests/core/tests/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"62"} +{"seq_id":"41562306523","text":"import asyncio\r\nimport time\r\nimport aiohttp\r\nimport async_timeout\r\nimport requests\r\n\r\nfrom pages.books_page import BooksPage\r\n\r\n\r\n# Task - async function that we are defining below\r\nasync def fetch_page(session, url):\r\n start = time.time()\r\n async with async_timeout.timeout(30):\r\n # Note that instead of : __enter__ and __exit__\r\n # The methods are __aenter__ and __aexit__\r\n async with session.get(url) as response:\r\n print(f\"Page took {time.time() - start}\")\r\n return await response.text()\r\n\r\n\r\n# Passing in event loop as an arg for safeguarding.\r\n# Ensures that new loop isn't created every time\r\nasync def get_multiple_pages(loop, *urls):\r\n tasks = []\r\n async with aiohttp.ClientSession(loop=loop) as session:\r\n for url in urls:\r\n tasks.append(fetch_page(session, url))\r\n # Each task in grouped_tasks runs synchronously\r\n grouped_tasks = asyncio.gather(*tasks) # runs until tasks are DONE. All pages gathered\r\n return await grouped_tasks # suspects after collecting all pages\r\n\r\n\r\nURL = 'http://books.toscrape.com/catalogue/page-1.html'\r\ncontent = requests.get(URL).content\r\npage = BooksPage(content)\r\n\r\nloop = asyncio.get_event_loop()\r\nbooks = page.books\r\n\r\nurls = [f'http://books.toscrape.com/catalogue/page-{page_num+1}.html' for page_num in range(page.page_count)]\r\n\r\nstart = time.time()\r\npages = loop.run_until_complete(get_multiple_pages(loop, *urls))\r\nprint(f'total page requests took: {time.time()-start}')\r\n\r\nfor page_content in pages:\r\n page = BooksPage(page_content)\r\n books.extend(page.books)\r\n\r\n'''\r\nfor page_num in range(1, page.page_count):\r\n print(page_num)\r\n url = f'http://books.toscrape.com/catalogue/page-{page_num+1}.html'\r\n page_content = requests.get(url).content\r\n #logger.debug(\"Creating BooksPage from page content\")\r\n page = BooksPage(page_content)\r\n books.extend(page.books)\r\n'''\r\n","repo_name":"caleanunoah/async_web_scraper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2642568768","text":"import sys\nfrom os.path import join, isfile\n\nfrom SCons.Script import DefaultEnvironment, SConscript\n\nenv = DefaultEnvironment()\nmcu = env.BoardConfig().get(\"build.mcu\")\ncore = env.BoardConfig().get(\"build.core\", \"\")\n\nif core == \"maple\":\n build_script = join(\n env.PioPlatform().get_package_dir(\"framework-arduinoststm32-maple\"),\n \"tools\", \"platformio-build-%s.py\" % mcu[0:7])\nelif core == \"stm32l0\":\n build_script = join(\n env.PioPlatform().get_package_dir(\"framework-arduinoststm32l0\"),\n \"tools\", \"platformio-build.py\")\nelse:\n build_script = join(env.PioPlatform().get_package_dir(\n \"framework-arduinoststm32\"), \"tools\", \"platformio\", \"platformio-build.py\")\n\nif not isfile(build_script):\n sys.stderr.write(\"Error: Missing PlatformIO build script %s!\\n\" % build_script)\n env.Exit(1)\n\nSConscript(build_script)\n","repo_name":"platformio/platform-ststm32","sub_path":"builder/frameworks/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"62"} +{"seq_id":"4989053076","text":"import json\n\nfrom utils.crypto import hash_sha256\nfrom utils.file import File\n\n\ndef node_from_json(node_json):\n d = json.loads(node_json)\n return TreeNode.from_dictionary(d)\n\n\ndef node_to_json(node):\n return json.dumps(node.flatten())\n\n\ndef get_root_hash(structure_json, file, replace_hash):\n \"\"\"\n Calculates the root hash of the provided structure with the provided\n file included.\n :param structure_json: The structure of a minimal tree in JSON.\n :param file: The file whose hash is to be used.\n :param replace_hash: If the hash of the provided file should be used instead of the already present hash.\n :return: The root hash of the merkle tree.\n \"\"\"\n structure = node_from_json(structure_json)\n\n tree = MerkleTree(16, False)\n tree.root_node = structure\n validation_node = tree.get_structure_with_file(file, replace_hash)\n validation_node.fix_hash()\n return validation_node.node_hash\n\n\nclass MerkleTree(object):\n root_node = None\n foundation = []\n\n def __init__(self, width=16, create_initial_nodes=True):\n self.width = width\n if create_initial_nodes:\n self.foundation = list([TreeNode() for _ in range(width)])\n\n def insert_file(self, file: 'File'):\n \"\"\"\n Inserts a file hash to the tree's foundation.\n The file_id decides the position of the leaf node.\n :param file: A File-object with a valid ID.\n \"\"\"\n try:\n node = TreeNode(None, None, bytes(file.data, encoding='utf-8'))\n self.foundation[file.file_id] = node\n self.update(file)\n return True\n except IndexError:\n return False\n\n def get_structure_with_file(self, file, replace_file_hash=False):\n \"\"\"\n Creates a tree structure with only the nodes that are required for the provided file to be\n verifiable.\n :param file: The file whose hash has to be included in the tree.\n :param replace_file_hash: If the hash in the structure should be replaced with the hash of the provided file.\n :return: The root node of the newly created tree.\n \"\"\"\n left_margin = 0\n width = self.width\n real_node = self.root_node\n node = TreeNode()\n root_node = node\n while width > 1:\n # Because the tree is binary and the file ID corresponds to its leaf node's position\n # it can be decided whether the leaf node is to the left or right by comparing\n # the file ID with the remaining tree width.\n if file.file_id >= left_margin + width / 2:\n node.left_child = TreeNode()\n node.left_child.node_hash = real_node.left_child.node_hash\n node.right_child = TreeNode()\n node = node.right_child\n real_node = real_node.right_child\n # If the leaf node is to the right then half of the tree downwards is to the left,\n # which has to be accounted for during the above comparison.\n left_margin += width / 2\n else:\n node.right_child = TreeNode()\n node.right_child.node_hash = real_node.right_child.node_hash\n node.left_child = TreeNode()\n node = node.left_child\n real_node = real_node.left_child\n\n # Each step splits the tree in half\n width /= 2\n\n if replace_file_hash:\n node.node_hash = hash_sha256(bytes(file.data, encoding='utf-8'))\n else:\n node.node_hash = real_node.node_hash\n\n return root_node\n\n def update(self, file: 'File'):\n \"\"\"\n Traverses the tree and updates the hashes connected to the provided file, including the leaf node itself.\n :param file: A File-object with a valid ID.\n \"\"\"\n left_margin = 0\n width = self.width\n node = self.root_node\n while width > 1:\n if file.file_id >= left_margin + width / 2:\n node = node.right_child\n node.node_hash = None\n left_margin += width / 2\n else:\n node = node.left_child\n node.node_hash = None\n width /= 2\n\n node.node_hash = hash_sha256(bytes(file.data, encoding='utf-8'))\n self.root_node.node_hash = None\n self.root_node.fix_hash()\n\n def build(self):\n \"\"\"\n Builds the tree from the bottom up.\n \"\"\"\n nodes = self.foundation.copy()\n next_level = []\n\n while len(nodes) > 1:\n for i in range(0, len(nodes), 2):\n if i == len(nodes) - 1:\n next_level.append(nodes[i])\n break\n\n left = nodes[i]\n right = nodes[i + 1]\n node = TreeNode(left, right)\n next_level.append(node)\n nodes = next_level.copy()\n next_level = []\n\n self.root_node = nodes[0] if len(nodes) > 0 else None\n\n\nclass TreeNode(object):\n node_hash = None\n\n def __init__(self, left_child: 'TreeNode' = None, right_child: 'TreeNode' = None, node_data: bytes = None):\n super().__init__()\n self.left_child = left_child\n self.right_child = right_child\n if node_data:\n self.node_hash = hash_sha256(node_data)\n else:\n self.fix_hash()\n\n def fix_hash(self):\n \"\"\"\n Calculates this node's hash based on its children's hashes.\n Does nothing if the node's hash is not empty.\n \"\"\"\n if not self.is_empty():\n return\n\n combined_hash = b''\n if self.left_child:\n self.left_child.fix_hash()\n if not self.left_child.is_empty():\n combined_hash += self.left_child.node_hash\n if self.right_child:\n self.right_child.fix_hash()\n if not self.right_child.is_empty():\n combined_hash += self.right_child.node_hash\n\n if combined_hash != b'':\n self.node_hash = hash_sha256(combined_hash)\n\n def is_empty(self):\n return self.node_hash is None\n\n def flatten(self):\n return {\n 'node_hash': self.node_hash.decode('utf-8').replace(\"'\", '\"') if self.node_hash else None,\n 'left_child': self.left_child.flatten() if self.left_child else None,\n 'right_child': self.right_child.flatten() if self.right_child else None\n }\n\n def __str__(self):\n return str(self.node_hash)\n\n def __eq__(self, other):\n if other and isinstance(other, TreeNode):\n if self.left_child != other.left_child:\n return False\n if self.right_child != other.right_child:\n return False\n return self.node_hash == other.node_hash\n return super().__eq__(other)\n\n @classmethod\n def from_dictionary(cls, dictionary):\n obj = cls()\n obj.node_hash = bytes(dictionary['node_hash'], encoding='utf-8') if dictionary['node_hash'] else None\n\n if 'left_child' in dictionary and dictionary['left_child'] is not None:\n obj.left_child = cls.from_dictionary(dictionary['left_child'])\n\n if 'right_child' in dictionary and dictionary['right_child'] is not None:\n obj.right_child = cls.from_dictionary(dictionary['right_child'])\n\n return obj\n","repo_name":"inda18plusplus/ggerholm-crypto","sub_path":"utils/merkle.py","file_name":"merkle.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6658911088","text":"# -*- coding: utf-8 -*-\n#######################################################################\n# License: MIT License #\n# Homepage: https://github.com/tasooshi/torboost/ #\n# Version: 0.9.5 #\n#######################################################################\n\nimport setuptools\n\n\nwith open('README.md') as f:\n long_description = f.read()\n\n\nsetuptools.setup(\n name='torboost',\n version='0.9.5',\n author='tasooshi',\n author_email='tasooshi@pm.me',\n description='Download utility for Tor',\n license='MIT License',\n keywords=[\n 'Tor',\n 'onion',\n 'download',\n ],\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/tasooshi/torboost/',\n packages=setuptools.find_packages(),\n install_requires=(\n 'requests[socks]==2.27.1',\n 'stem==1.8.0',\n ),\n entry_points={\n 'console_scripts': (\n 'torboost=torboost.torboost:entry_point',\n ),\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ]\n)\n","repo_name":"tasooshi/torboost","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"62"} +{"seq_id":"29015086758","text":"import torch\n\nimport timesformer.utils.lr_policy as lr_policy\n\n\ndef construct_optimizer(model, cfg):\n \"\"\"\n Construct a stochastic gradient descent or ADAM optimizer with momentum.\n Details can be found in:\n Herbert Robbins, and Sutton Monro. \"A stochastic approximation method.\"\n and\n Diederik P.Kingma, and Jimmy Ba.\n \"Adam: A Method for Stochastic Optimization.\"\n\n Args:\n model (model): model to perform stochastic gradient descent\n optimization or ADAM optimization.\n cfg (config): configs of hyper-parameters of SGD or ADAM, includes base\n learning rate, momentum, weight_decay, dampening, and etc.\n \"\"\"\n # Batchnorm parameters.\n bn_params = []\n # Non-batchnorm parameters.\n non_bn_parameters = []\n for name, p in model.named_parameters():\n if \"bn\" in name:\n bn_params.append(p)\n else:\n non_bn_parameters.append(p)\n # Apply different weight decay to Batchnorm and non-batchnorm parameters.\n # In Caffe2 classification codebase the weight decay for batchnorm is 0.0.\n # Having a different weight decay on batchnorm might cause a performance\n # drop.\n optim_params = [\n {\"params\": bn_params, \"weight_decay\": cfg.BN.WEIGHT_DECAY},\n {\"params\": non_bn_parameters, \"weight_decay\": cfg.SOLVER.WEIGHT_DECAY},\n ]\n # Check all parameters will be passed into optimizer.\n assert len(list(model.parameters())) == len(non_bn_parameters) + len(\n bn_params\n ), \"parameter size does not match: {} + {} != {}\".format(\n len(non_bn_parameters), len(bn_params), len(list(model.parameters()))\n )\n\n if cfg.SOLVER.OPTIMIZING_METHOD == \"sgd\":\n return torch.optim.SGD(\n optim_params,\n lr=cfg.SOLVER.BASE_LR,\n momentum=cfg.SOLVER.MOMENTUM,\n weight_decay=cfg.SOLVER.WEIGHT_DECAY,\n dampening=cfg.SOLVER.DAMPENING,\n nesterov=cfg.SOLVER.NESTEROV,\n )\n elif cfg.SOLVER.OPTIMIZING_METHOD == \"adam\":\n return torch.optim.Adam(\n optim_params,\n lr=cfg.SOLVER.BASE_LR,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=cfg.SOLVER.WEIGHT_DECAY,\n )\n elif cfg.SOLVER.OPTIMIZING_METHOD == \"adamw\":\n return torch.optim.AdamW(\n optim_params,\n lr=cfg.SOLVER.BASE_LR,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=cfg.SOLVER.WEIGHT_DECAY,\n )\n else:\n raise NotImplementedError(\n \"Does not support {} optimizer\".format(cfg.SOLVER.OPTIMIZING_METHOD)\n )\n\n\ndef get_epoch_lr(cur_epoch, cfg):\n \"\"\"\n Retrieves the lr for the given epoch (as specified by the lr policy).\n Args:\n cfg (config): configs of hyper-parameters of ADAM, includes base\n learning rate, betas, and weight decays.\n cur_epoch (float): the number of epoch of the current training stage.\n \"\"\"\n return lr_policy.get_lr_at_epoch(cfg, cur_epoch)\n\n\ndef set_lr(optimizer, new_lr):\n \"\"\"\n Sets the optimizer lr to the specified value.\n Args:\n optimizer (optim): the optimizer using to optimize the current network.\n new_lr (float): the new learning rate to set.\n \"\"\"\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = new_lr\n","repo_name":"facebookresearch/TimeSformer","sub_path":"timesformer/models/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":1280,"dataset":"github-code","pt":"62"} +{"seq_id":"69865701318","text":"import pyvista as pv\n\ndef getDamagedElements(vtkFile, threshold):\n data = pv.read(vtkFile)\n data = data[0][0]\n data.set_active_scalars('d')\n data = data.point_data_to_cell_data()\n elementalDamageList = data.cell_data['d']\n damagedElements = []\n for i in range(len(elementalDamageList)):\n if elementalDamageList[i] > threshold:\n damagedElements.append(i)\n return damagedElements\n\n","repo_name":"andrembcosta/crack-tip-tracking","sub_path":"damagedElements.py","file_name":"damagedElements.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19045484081","text":"import ast\nimport os\nimport collections\n\nfrom nltk import pos_tag\n\n# rename !\ndef merge_to_flat_list(_list):\n \"\"\" [(1,2), (3,4)] -> [1, 2, 3, 4]\"\"\"\n return sum([list(item) for item in _list], [])\n\n\n### see to Rasswet / otus-web / dz01 ...\ndef is_verb(word):\n if not word:\n return False\n \n tagged_word = pos_tag([word])\n word, tag = tagged_word[0]\n standard_tag_for_verb = 'VB'\n return tag == standard_tag_for_verb\n\n\ndef get_file_names_from_directories(path):\n filenames = []\n for dirname, dirs, files in os.walk(path, topdown=True):\n for file in files:\n if file.endswith('.py'):\n filenames.append(os.path.join(dirname, file))\n if len(filenames) == 100:\n break\n #print('total %s files' % len(filenames))\n return filenames\n\n\ndef generate_trees(_path, with_filenames=False, with_file_content=False):\n filenames = []\n trees = []\n filenames = get_file_names_from_directories(_path)\n \n for filename in filenames:\n with open(filename, 'r', encoding='utf-8') as attempt_handler:\n main_file_content = attempt_handler.read()\n try:\n tree = ast.parse(main_file_content)\n except SyntaxError as e:\n tree = None\n if with_filenames:\n if with_file_content:\n trees.append((filename, main_file_content, tree))\n else:\n trees.append((filename, tree))\n else:\n trees.append(tree)\n \n filtered_trees = [tree for tree in trees if tree] \n return filtered_trees\n\n\ndef get_verbs_from_function_name(function_name):\n verbs = [word for word in function_name.split('_') if is_verb(word)]\n return verbs\n\n\n\n###### next refactoring :\n## see Rasswet / otus-web\n## def is_system_name():\n## def extract functions_from_trees\n\n\ndef is_system_name(func):\n return func.startswith('__') and func.endswith('__')\n\ndef get_function_names_from_tree(tree):\n return [node.name.lower() for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)]\n\ndef get_not_system_function_names(path):\n trees = generate_trees(path) \n \n list_of_all_function_names = [get_function_names_from_tree(tree) for tree in trees]\n #print(list_of_all_functions)\n flat_list_of_function_names = merge_to_flat_list(list_of_all_function_names)\n #print(flat_list_of_function_names)\n not_system_function_names = [func for func in flat_list_of_function_names if not is_system_name(func)]\n return not_system_function_names\n\n\ndef get_top_verbs_in_path(path, top_size=10): \n function_names = get_not_system_function_names(path)\n \n list_of_verbs = [get_verbs_from_function_name(function_name) for function_name in function_names]\n \n flat_list_of_verbs = merge_to_flat_list(list_of_verbs)\n return collections.Counter(flat_list_of_verbs).most_common(top_size)\n\n\ndef collect_verbs():\n wds = []\n projects = [\n 'django',\n 'flask',\n #'pyramid',\n #'reddit',\n #'requests',\n #'sqlalchemy',\n ]\n for project in projects:\n path = os.path.join('.', project)\n wds += get_top_verbs_in_path(path)\n \n return wds\n\n'''\ntop_size = 200\nprint('total %s words, %s unique' % (len(wds), len(set(wds))))\nfor word, occurence in collections.Counter(wds).most_common(top_size):\n print(word, occurence)\n'''\n\ndef print_amount_of_verbs(verbs_wds):\n print('total %s words, %s unique' % (len(verbs_wds), len(set(verbs_wds))))\n pass\n\n\ndef print_word_occurence_paires(verbs_wds, _top_size):\n word_occurance_pairs = collections.Counter(verbs_wds).most_common(_top_size)\n for word, occurence in word_occurance_pairs:\n print(word, occurence)\n pass \n\n\nif __name__ == '__main__':\n top_size = 200\n verbs = collect_verbs()\n print_amount_of_verbs(verbs)\n print_word_occurence_paires(verbs,top_size)","repo_name":"DmitrySevostianov/otus_home_work_01","sub_path":"otus_home_work_01.py","file_name":"otus_home_work_01.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38559946581","text":"__all__ = [\"Resizer\"]\n\n\nfrom sklearn.base import TransformerMixin\n\nfrom ...base_transform import BaseTransform\nfrom ...internal.core.feature_extraction.image.resizer import Resizer as core\nfrom ...internal.utils.utils import trace\n\n\nclass Resizer(core, BaseTransform, TransformerMixin):\n \"\"\"\n\n Resizes an image to a specified dimension using a specified\n resizing method.\n\n .. remarks::\n ``Resizer`` resizes an image to the specified height and width\n using a specified resizing method. The input variables to this\n transforms must\n be images, typically the result of the ``Loader`` transform.\n\n :param columns: A dictionary of key-value pairs, where key is the output\n column name and value is the input column name.\n\n * Multiple key-value pairs are allowed.\n * Input column type: :ref:`Picture`.\n * Output column type: :ref:`Picture`.\n * If the output column names are same as the input column names, then\n simply specify ``columns`` as a list of strings.\n\n The << operator can be used to set this value (see\n `Column Operator `_)\n\n For example\n * Resizer(columns={'out1':'input1', 'out2':'input2'})\n * Resizer() << {'out1':'input1', 'out2':'input2'}\n\n For more details see `Columns `_.\n\n :param image_width: Specifies the width of the scaled image in pixels.\n The default value is 224.\n\n :param image_height: Specifies the height of the scaled image in pixels.\n The default value is 224.\n\n :param resizing: Specified the resizing method to use. Note that all\n methods\n are using bilinear interpolation. The options are:\n\n * ``\"IsoPad\"``: The image is resizerd such that the aspect ratio is\n preserved.\n If needed, the image is padded with black to fit the new width or\n height.\n * ``\"IsoCrop\"``: The image is resizerd such that the aspect ratio is\n preserved.\n If needed, the image is cropped to fit the new width or height.\n * ``\"Aniso\"``: The image is stretched to the new width and height,\n without\n preserving the aspect ratio.\n\n The default value is ``\"IsoCrop\"``.\n\n :param crop_anchor: Anchor for cropping.\n\n :param params: Additional arguments sent to compute engine.\n\n .. seealso::\n :py:class:`Loader `,\n :py:class:`PixelExtractor\n `.\n\n .. index:: transform, image\n\n Example:\n .. literalinclude:: /../nimbusml/examples/Image.py\n :language: python\n \"\"\"\n\n @trace\n def __init__(\n self,\n image_width=0,\n image_height=0,\n resizing='IsoCrop',\n crop_anchor='Center',\n columns=None,\n **params):\n\n if columns:\n params['columns'] = columns\n BaseTransform.__init__(self, **params)\n core.__init__(\n self,\n image_width=image_width,\n image_height=image_height,\n resizing=resizing,\n crop_anchor=crop_anchor,\n **params)\n self._columns = columns\n\n def get_params(self, deep=False):\n \"\"\"\n Get the parameters for this operator.\n \"\"\"\n return core.get_params(self)\n","repo_name":"microsoft/NimbusML","sub_path":"src/python/nimbusml/feature_extraction/image/resizer.py","file_name":"resizer.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"62"} +{"seq_id":"34971879252","text":"import numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import PrecisionRecallDisplay\nfrom sklearn.metrics import average_precision_score\n\ndef plot_cm(labels, predictions, p=0.5):\n cf_matrix = confusion_matrix(labels, predictions > p)\n group_names = ['True Neg', 'False Pos', 'False Neg', 'True Pos']\n group_counts = [\"{0:0.0f}\".format(value) for value in cf_matrix.flatten()]\n group_percentages_pos = [\"({0:.2%})\".format(value) for value in cf_matrix.flatten()[0:2]/np.sum(cf_matrix.flatten()[0:2])]\n group_percentages_neg = [\"({0:.2%})\".format(value) for value in cf_matrix.flatten()[2:4]/np.sum(cf_matrix.flatten()[2:4])]\n group_percentages = group_percentages_pos + group_percentages_neg\n categories = ['Not-STEMI', 'STEMI']\n labels = [f\"{v1}\\n{v2}\\n{v3}\" for v1, v2, v3 in zip(group_names,group_counts,group_percentages)]\n labels = np.asarray(labels).reshape(2,2)\n \n #plt.figure(figsize=(5,5))\n sns.heatmap(cf_matrix, annot=labels, fmt='', cmap='Blues',xticklabels=categories,yticklabels=categories, annot_kws={\"size\": 12})\n plt.title('Confusion Matrix (cut off = {:.2f})'.format(p), fontsize= 14)\n plt.ylabel('Actual label', fontsize= 12)\n plt.xlabel('Predicted label', fontsize= 12)\n\n print('True Negatives: ', cf_matrix[0][0])\n print('False Positives: ', cf_matrix[0][1])\n print('False Negatives: ', cf_matrix[1][0])\n print('True Positives: ', cf_matrix[1][1])\n print('Total STEMI: ', np.sum(cf_matrix[1]))\n\ndef roc_curve(y_true, y_pred, y_proba):\n conf_mat = confusion_matrix(y_true.argmax(axis=1), y_pred.argmax(axis=1))\n TP = conf_mat[1][1]\n TN = conf_mat[0][0]\n FN = conf_mat[1][0]\n FP = conf_mat[0][1]\n\n acc = round((TP+TN)/(TP+TN+FP+FN), 4)\n sens = round(TP/(TP+FN), 4)\n spec = round(TN/(TN+FP), 4)\n auroc = round(roc_auc_score(y_true, y_proba), 4)\n precision = round(precision_score(y_true.argmax(axis=1), y_pred.argmax(axis=1)), 4)\n recall = round(recall_score(y_true.argmax(axis=1), y_pred.argmax(axis=1)), 4)\n f1score = round(f1_score(y_true.argmax(axis=1), y_pred.argmax(axis=1)), 4)\n\n print(conf_mat, \"\\n\")\n print(\"Acc\", acc)\n print(\"Sensitivity\", sens)\n print(\"Specificity\", spec)\n print(\"AUROC\", auroc)\n print('Precision', precision)\n print(\"Recall\", recall)\n print(\"F1\", f1score)\n print(classification_report(y_true, y_pred))\n \n # ROC & AUC\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(y_true.shape[1]):\n fpr[i], tpr[i], _ = roc_curve(y_true[:,i], y_proba[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n colors = cycle(['blue', 'red'])\n for i, color in zip(range(y_true.shape[1]), colors):\n if i == 0:\n pass\n else:\n plt.plot(fpr[i], tpr[i], color=color, lw=1.5, label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[i]))\n \n plt.plot([0, 1], [0, 1], lw=1.5, color='black', linestyle='dotted', label = 'baseline')\n plt.xlim([-0.05, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc=\"lower right\")\n plt.show()\n\nlabel = {'0': 'Not-STEMI', '1':'STEMI'}\n\n\ndef pr_curve(y_true, y_pred, y_proba):\n # PR & AUC\n prec = dict()\n rec = dict()\n pr_auc = dict()\n for i in range(y_true.shape[1]):\n prec[i], rec[i], _ = sklearn.metrics.precision_recall_curve(y_true[:,i], y_proba[:, i])\n pr_display = sklearn.metrics.PrecisionRecallDisplay(precision=prec[i], recall=rec[i])\n pr_display.plot(label = 'Precision-Recall curve (area = {0:0.2f})' ''.format(average_precision_score(y_true, y_pred)))\n plt.show()\n\n\nlabel = {'0': 'Non-STEMI', '1':'STEMI'}\n","repo_name":"kyulee-jeon/ecg_python","sub_path":"confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"34549102783","text":"#coding: utf-8\n\ndef InterValue(Growth, EarningsPerShare, DiscountRate, PriceEarningsRatio): #成长股内在价值计算\n #InterValue 内在价值,本函数返回值\n #Growth 增长率\n #EarningsPerShare 每股收益\n #DiscountRate 折现率\n #PriceEarningRatio 基准市盈率\n #V1, V2, V3, V4, V 不同部份增长率现值\n \n #分部增长率现值计算\n if Growth > 2: #增长率高于200%\n V1 = (1 + 0.5) ** 2 / (1 + DiscountRate) ** 2\n V2 = (1 + 0.5) ** 1.5 / (1 + DiscountRate) ** 1.5\n V3 = (1 + 1) / (1 + DiscountRate)\n V4 = (1 + Growth - 2) ** 0.5 / (1 + DiscountRate) ** 0.5\n elif Growth > 1 : #增长率高于100%但小于等于150%\n V1 = (1 + 0.5) ** 2 / (1 + DiscountRate) ** 2\n V2 = (1 + 0.5) ** 1.5 / (1 + DiscountRate) ** 1.5\n V3 = (1 + Growth - 1) / (1 + DiscountRate)\n V4 = 0\n elif Growth > 0.5 : #增长率高于50%但小于等于100%\n V1 = (1 + 0.5) ** 2 / (1 + DiscountRate) ** 2\n V2 = (1 + Growth - 0.5) ** 1.5 / (1 + DiscountRate) ** 1.5\n V3 = 0\n V4 = 0\n elif Growth >=0: #增长率小于等于50%\n V1 = (1 + Growth) ** 2 / (1 + DiscountRate) ** 2\n V2 = 0\n V3 = 0\n V4 = 0\n else:\n V1 = (1 + Growth) / (1 + DiscountRate) **2\n V2 = 0\n V3 = 0\n V4 = 0\n\n V = V1 + V2 + V3 + V4 #合计增长率现值计算\n InterValue = EarningsPerShare * V * PriceEarningsRatio #内在价值计算\n return InterValue\n\nif __name__=='__main__':\n Growth=float(input('请输入预测增长率:'))\n EarningsPerShare=float(input('请输入每股收益:'))\n DiscountRate=0.07\n PriceEarningsRatio=15\n print(InterValue(Growth, EarningsPerShare, DiscountRate, PriceEarningsRatio))\n \n","repo_name":"prana70/stock","sub_path":"intervalue.py","file_name":"intervalue.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"37752349794","text":"# This file is part of LibreOsteo.\n#\n# LibreOsteo is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# LibreOsteo is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with LibreOsteo. If not, see .\n# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nfrom libreosteoweb.api import file_integrator\ntry:\n from unittest.mock import mock_open\n from unittest.mock import patch\n from unittest.mock import MagicMock\n from unittest.mock import Mock\nexcept ImportError:\n from mock import mock_open\n from mock import patch\n from mock import MagicMock\n from mock import Mock\n\n\nclass TestFileIntegrator(TestCase):\n def setUp(self):\n self.patcher = patch('libreosteoweb.api.file_integrator.open',\n mock_open(),\n create=True)\n self.patcher.start()\n\n def test_filecontentkey(self):\n f = 'file'\n t = 'test'\n key1 = file_integrator.FileContentKey(f, None)\n key2 = file_integrator.FileContentKey(f, None)\n\n key3 = file_integrator.FileContentKey(t, None)\n self.assertTrue(key1 == key2)\n self.assertFalse(key1 == key3)\n\n def test_file_content_proxy(self):\n f = MagicMock()\n f.read.return_value = 'Nom;Prenom;Nom de Famille;'\n proxy1 = file_integrator.FileContentProxy()\n proxy2 = file_integrator.FileContentProxy()\n\n c1 = proxy1.get_content(f)\n c2 = proxy2.get_content(f)\n\n self.assertTrue(c1 == c2)\n\n f.close()\n\n def test_file_content_proxy_not_the_same(self):\n f = MagicMock()\n f.read.return_value = 'Nom;Prenom;Nom de Famille;'\n f.__iter__.return_value = [\n 'Nom;Prenom;Nom de Famille;',\n ]\n\n f2 = MagicMock()\n f2.read.return_value = 'Nom de famille;Prenom'\n f2.__iter__.return_value = [\n 'Nom de famille;Prenom',\n ]\n\n proxy1 = file_integrator.FileContentProxy()\n proxy2 = file_integrator.FileContentProxy()\n\n c1 = proxy1.get_content(f)\n c2 = proxy2.get_content(f2)\n\n self.assertTrue(c1 != c2)\n\n def test_analyzertype(self):\n content = {}\n content['header'] = ['nom de famille', 'prenom', 'date de naissance']\n content['nb_row'] = 1\n content['content'] = ['test', 'test', 'test']\n\n a = file_integrator.AnalyzerPatientFile(content)\n self.assertTrue(a.is_instance())\n\n def test_analyze_handler(self):\n handler = file_integrator.AnalyzerHandler()\n header = u'Numero;Nom de Famille;Nom de jeune fille ou jeune homme;Prenom;Date de naissance (JJ MM AAAA);Sex (M F);Rue;Complement dadresse;code postal;ville;email;Telephone;Mobile;Profession;Loisirs;Fumeur (O/N);Lateralite;Informations importantes;Traitement en cours;Antecedents chirurgicaux;Antecedents medicaux;Antecedents familiaux;Antecedents traumatiques;CR medicaux'\n f = MagicMock()\n f.read.return_value = header\n f.__iter__.return_value = [\n header,\n ]\n report = handler.analyze(f)\n self.assertTrue(report.is_empty)\n self.assertTrue(report.is_valid)\n self.assertEquals(file_integrator.FileCsvType.PATIENT, report.type)\n\n def test_analyze_handler_not_empty(self):\n handler = file_integrator.AnalyzerHandler()\n\n header = u'Numero;Nom de Famille;Nom de jeune fille/ou jeune homme;Prenom;Date de naissance (JJ/MM/AAAA);Sex (M/F);Rue;Complement dadresse;code postal;ville;email;Telephone;Mobile;Profession;Loisirs;Fumeur (O/N);Lateralite;Informations importantes;Traitement en cours;Antecedents chirurgicaux;Antecedents medicaux;Antecedents familiaux;Antecedents traumatiques;CR medicaux'\n value = u'Test;Test;Test'\n\n f = MagicMock()\n f.read.return_value = header\n f.__iter__.return_value = [header, value]\n\n report = handler.analyze(f)\n #self.assertFalse(report.is_empty)\n self.assertTrue(report.is_valid)\n self.assertEquals(file_integrator.FileCsvType.PATIENT, report.type)\n\n def test_file_content_adapter(self):\n header = 'Nom;Prenom;Nom de Famille'\n\n f = MagicMock()\n f.read.return_value = header\n f.__iter__.return_value = (header, )\n\n adapter = file_integrator.FileContentAdapter(f)\n result = adapter.get_content()\n self.assertEquals(1, result['nb_row'])\n self.assertEquals(['Nom', 'Prenom', 'Nom de Famille'],\n result['header'])\n self.assertEquals([], result['content'])\n\n def tearDown(self):\n self.patcher.stop()\n","repo_name":"libreosteo/LibreOsteo","sub_path":"libreosteoweb/tests/test_file_integrator.py","file_name":"test_file_integrator.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"37306124696","text":"import numpy as np\nfrom Bio import Phylo, SeqIO\n# from cStringIO import StringIO\nfrom io import StringIO\nfrom ete3 import Tree, TreeNode\nimport copy\nfrom collections import defaultdict, OrderedDict\nfrom bitarray import bitarray\nimport warnings\nimport torch\nimport matplotlib.pyplot as plt\nwarnings.simplefilter('always', UserWarning)\n\ndef mcmc_treeprob(filename, data_type):\n mcmc_samp_tree_stats = Phylo.parse(filename, data_type)\n mcmc_samp_tree_dict = {}\n num_hp_tree = 0\n for tree in mcmc_samp_tree_stats:\n handle = StringIO()\n Phylo.write(tree, handle, 'newick')\n mcmc_samp_tree_dict[Tree(handle.getvalue().strip())] = tree.weight\n\n handle.close()\n num_hp_tree += 1\n\n return mcmc_samp_tree_dict\n\ndef generate(taxa):\n if len(taxa) == 3:\n return [Tree('(' + ','.join(taxa) + ');')]\n else:\n res = []\n sister = Tree('(' + taxa[-1] + ');')\n for tree in generate(taxa[:-1]):\n \n for node in tree.traverse('preorder'):\n if not node.is_root():\n node.up.add_child(sister)\n node.detach()\n sister.add_child(node)\n res.append(copy.deepcopy(tree))\n node.detach()\n sister.up.add_child(node)\n sister.detach()\n\n return res\n\ndef namenum(tree, taxon, nodetosplitMap=None):\n taxon2idx = {}\n j = len(taxon)\n if nodetosplitMap:\n idx2split = ['']*(2*j-3)\n for i, name in enumerate(taxon):\n taxon2idx[name] = i\n for node in tree.traverse(\"postorder\"):\n if node.is_leaf():\n if not isinstance(node.name, str):\n warnings.warn(\"The taxon names are not strings, please check if they are already integers!\")\n else:\n node.name = taxon2idx[node.name]\n if nodetosplitMap:\n idx2split[node.name] = nodetosplitMap[node]\n else:\n node.name, j = j, j+1\n if nodetosplitMap and not node.is_root():\n idx2split[node.name] = nodetosplitMap[node]\n \n if nodetosplitMap:\n return idx2split\n\ndef renamenum(tree, level, target=None, return_rel_pos=True):\n '''name the subtree. DO NOT change the names of leaves'''\n j = level\n r_target = None\n rel_pos = np.arange(stop=2*level-4)\n for node in tree.traverse(\"postorder\"):\n flag = True if (target != None and node.name == target) else False\n if node.is_leaf():\n pass\n else:\n if node.name != '':\n rel_pos[node.name] = j\n node.name, j = j, j+1\n if flag:\n r_target = node.name\n if return_rel_pos:\n return r_target, rel_pos\n else:\n return r_target\n\ndef renamenum_backward(tree, level, target=None, return_rel_pos=None):\n '''name the subtree. DO NOT change the names of leaves'''\n j = level\n r_target = None\n rel_pos = list(range(2*level-2))\n for node in tree.traverse(\"postorder\"):\n flag = True if (target != None and node.name == target) else False\n if node.is_leaf():\n pass\n else:\n rel_pos[j] = copy.copy(node.name)\n node.name, j = j, j+1\n if flag:\n r_target = copy.copy(node.name)\n if return_rel_pos:\n return r_target, rel_pos\n else:\n return r_target\n\ndef node_embedding(tree, ntips):\n leaf_features = torch.eye(ntips)\n for node in tree.traverse('postorder'):\n if node.is_leaf():\n node.c = 0\n node.d = leaf_features[node.name]\n else:\n child_c, child_d = 0., 0.\n for child in node.children:\n child_c += child.c\n child_d += child.d\n node.c = 1./(3. - child_c) \n node.d = node.c * child_d\n \n node_features, node_idx_list, edge_index = [], [], [] \n for node in tree.traverse('preorder'):\n neigh_idx_list = []\n if not node.is_root():\n node.d = node.c * node.up.d + node.d\n # parent_idx_list.append(node.up.name)\n neigh_idx_list.append(node.up.name)\n \n if not node.is_leaf():\n neigh_idx_list.extend([child.name for child in node.children])\n else:\n neigh_idx_list.extend([-1, -1]) \n else:\n neigh_idx_list.extend([child.name for child in node.children])\n \n edge_index.append(neigh_idx_list) \n node_features.append(node.d)\n node_idx_list.append(node.name)\n \n branch_idx_map = torch.sort(torch.tensor(node_idx_list).long(), dim=0, descending=False)[1]\n edge_index = torch.tensor(edge_index).long()\n \n\n return torch.index_select(torch.stack(node_features), 0, branch_idx_map), edge_index[branch_idx_map]\n\ndef mp_node_embedding(args):\n tree, ntips, level = args\n leaf_features = torch.eye(ntips)\n name_dict = {}\n rel_pos = np.arange(max(4,2*level-4))\n j = level\n\n for node in tree.traverse('postorder'):\n if node.is_leaf():\n node.c = 0\n node.d = leaf_features[node.name]\n else:\n child_c, child_d = 0., 0.\n for child in node.children:\n child_c += child.c\n child_d += child.d\n node.c = 1./(3. - child_c) \n node.d = node.c * child_d\n if node.name != '':\n rel_pos[node.name] = j\n node.name, j = j, j+1\n name_dict[node.name] = node\n \n node_features, node_idx_list, edge_index = [], [], [] \n for node in tree.traverse('preorder'):\n neigh_idx_list = []\n if not node.is_root():\n node.d = node.c * node.up.d + node.d\n neigh_idx_list.append(node.up.name)\n \n if not node.is_leaf(): \n neigh_idx_list.extend([child.name for child in node.children])\n else:\n neigh_idx_list.extend([-1, -1]) \n else:\n neigh_idx_list.extend([child.name for child in node.children])\n \n edge_index.append(neigh_idx_list) \n node_features.append(node.d)\n node_idx_list.append(node.name)\n \n branch_idx_map = torch.sort(torch.tensor(node_idx_list).long(), dim=0, descending=False)[1]\n edge_index = torch.tensor(edge_index).long() \n \n return tree, torch.index_select(torch.stack(node_features), 0, branch_idx_map), edge_index[branch_idx_map], torch.from_numpy(rel_pos), name_dict\n\ndef mp_add(args):\n tree, name, pos = args\n assert isinstance(tree, TreeNode)\n node_to_add = TreeNode(name=name)\n anchor_node = pos\n parent = anchor_node.up \n\n parent.remove_child(anchor_node)\n newparent = TreeNode()\n newparent.add_child(anchor_node)\n newparent.add_child(node_to_add)\n parent.add_child(newparent)\n return tree\n\ndef mp_renamenum(args):\n tree, level = args\n '''name the subtree. DO NOT change the names of leaves'''\n j = level\n for node in tree.traverse(\"postorder\"):\n if node.is_leaf():\n pass\n else:\n node.name, j = j, j+1\n return tree\n\n\ndef remove(tree, name, return_pos=False):\n assert isinstance(tree, TreeNode)\n node_to_remove = tree.search_nodes(name=name)[0]\n parent = node_to_remove.up\n\n if parent.is_root():\n sisters = node_to_remove.get_sisters()\n if not sisters[0].is_leaf():\n newtree, sister = sisters\n elif not sisters[1].is_leaf():\n sister, newtree = sisters\n else:\n raise RuntimeError('This tree has less than 3 leaves!')\n newtree.up, sister.up = None, None\n newtree.add_child(sister)\n if return_pos:\n return newtree, sister.name\n else:\n return newtree\n else: \n grandparent = parent.up\n sister = node_to_remove.get_sisters()[0]\n grandparent.add_child(sister)\n grandparent.remove_child(parent)\n if return_pos:\n return tree, sister.name\n else:\n return tree\n\ndef add(tree, name, pos):\n assert isinstance(tree, TreeNode)\n node_to_add = TreeNode(name=name)\n anchor_node = pos\n parent = anchor_node.up \n\n parent.remove_child(anchor_node)\n newparent = TreeNode()\n newparent.add_child(anchor_node)\n newparent.add_child(node_to_add)\n parent.add_child(newparent)\n\ndef loadData(filename,data_type):\n data = []\n id_seq = []\n for seq_record in SeqIO.parse(filename,data_type):\n id_seq.append(seq_record.id)\n data.append(list(seq_record.seq.upper()))\n\n return data, id_seq\n\ndef init(tree, branch=None, name='all', scale=0.1, display=False, return_map=False):\n if return_map: idx2node = {}\n i, j = 0, len(tree)\n for node in tree.traverse(\"postorder\"):\n if node.is_leaf():\n if name != 'interior':\n node.name, i = i, i+1\n else:\n node.name = int(node.name)\n else:\n node.name, j = j, j+1\n if not node.is_root():\n if isinstance(branch, str) and branch =='random':\n node.dist = np.random.exponential(scale)\n elif branch is not None:\n node.dist = branch[node.name]\n else:\n node.dist = 0.0\n \n if return_map: idx2node[node.name] = node\n if display:\n print(node.name, node.dist)\n \n if return_map: return idx2node\n\ndef emp_mcmc_treeprob(filename, data_type, truncate=None, taxon=None):\n mcmc_samp_tree_stats = Phylo.parse(filename, data_type)\n mcmc_samp_tree_dict = OrderedDict()\n mcmc_samp_tree_name = []\n mcmc_samp_tree_wts = []\n num_hp_tree = 0\n if taxon:\n taxon2idx = {taxon: i for i, taxon in enumerate(taxon)}\n \n for tree in mcmc_samp_tree_stats:\n handle = StringIO()\n Phylo.write(tree, handle,'newick')\n mcmc_samp_tree_dict[tree.name] = Tree(handle.getvalue().strip())\n if taxon:\n if taxon != 'keep':\n namenum(mcmc_samp_tree_dict[tree.name],taxon)\n else:\n init(mcmc_samp_tree_dict[tree.name],name='interior')\n \n handle.close()\n mcmc_samp_tree_name.append(tree.name)\n mcmc_samp_tree_wts.append(tree.weight)\n num_hp_tree += 1\n \n if truncate and num_hp_tree >= truncate:\n break\n \n return mcmc_samp_tree_dict, mcmc_samp_tree_name, mcmc_samp_tree_wts\n\ndef summary(dataset, file_path, samp_size=750001):\n tree_dict_total = OrderedDict()\n tree_dict_map_total = defaultdict(float)\n tree_names_total = []\n tree_wts_total = []\n n_samp_tree = 0\n for i in range(1,11):\n tree_dict_rep, tree_name_rep, tree_wts_rep = emp_mcmc_treeprob(file_path + dataset + '/rep_{}/'.format(i) + dataset + '.trprobs', 'nexus', taxon='keep')\n tree_wts_rep = np.round(np.array(tree_wts_rep)*samp_size)\n \n for i, name in enumerate(tree_name_rep):\n tree_id = tree_dict_rep[name].get_topology_id()\n if tree_id not in tree_dict_map_total:\n n_samp_tree += 1\n tree_names_total.append('tree_{}'.format(n_samp_tree))\n tree_dict_total[tree_names_total[-1]] = tree_dict_rep[name]\n\n tree_dict_map_total[tree_id] += tree_wts_rep[i]\n \n for key in tree_dict_map_total:\n tree_dict_map_total[key] /= 10*samp_size\n\n for name in tree_names_total:\n tree_wts_total.append(tree_dict_map_total[tree_dict_total[name].get_topology_id()]) \n \n return tree_dict_total, tree_names_total, tree_wts_total\n\n\ndef get_tree_list_raw(filename, burnin=0, truncate=None, hpd=0.95):\n tree_dict = {}\n tree_wts_dict = defaultdict(float)\n tree_names = []\n i, num_trees = 0, 0\n with open(filename, 'r') as input_file:\n while True:\n line = input_file.readline()\n if line == \"\":\n break\n num_trees += 1\n if num_trees < burnin:\n continue\n tree = Tree(line.strip())\n tree_id = tree.get_topology_id()\n if tree_id not in tree_wts_dict:\n tree_name = 'tree_{}'.format(i)\n tree_dict[tree_name] = tree\n tree_names.append(tree_name)\n i += 1 \n tree_wts_dict[tree_id] += 1.0\n \n if truncate and num_trees == truncate + burnin:\n break\n tree_wts = [tree_wts_dict[tree_dict[tree_name].get_topology_id()]/(num_trees-burnin) for tree_name in tree_names]\n if hpd < 1.0:\n ordered_wts_idx = np.argsort(tree_wts)[::-1]\n cum_wts_arr = np.cumsum([tree_wts[k] for k in ordered_wts_idx])\n cut_at = next(x[0] for x in enumerate(cum_wts_arr) if x[1] > hpd)\n tree_wts = [tree_wts[k] for k in ordered_wts_idx[:cut_at]]\n tree_names = [tree_names[k] for k in ordered_wts_idx[:cut_at]]\n \n return tree_dict, tree_names, tree_wts\n\ndef summary_raw(dataset, file_path, truncate=None, hpd=0.95, n_rep=10):\n tree_dict_total = {}\n tree_id_set_total = set()\n tree_names_total = []\n n_samp_tree = 0\n \n for i in range(1, n_rep+1):\n tree_dict_rep, tree_names_rep, tree_wts_rep = get_tree_list_raw(file_path + dataset + '/' + dataset + '_ufboot_rep_{}'.format(i), truncate=truncate, hpd=hpd)\n for j, name in enumerate(tree_names_rep):\n tree_id = tree_dict_rep[name].get_topology_id()\n if tree_id not in tree_id_set_total:\n n_samp_tree += 1\n tree_names_total.append('tree_{}'.format(n_samp_tree))\n tree_dict_total[tree_names_total[-1]] = tree_dict_rep[name]\n tree_id_set_total.add(tree_id)\n \n return tree_dict_total, tree_names_total\n \ndef get_support_from_mcmc(taxa, tree_dict_total, tree_names_total, tree_wts_total=None):\n rootsplit_supp_dict = OrderedDict()\n subsplit_supp_dict = OrderedDict()\n toBitArr = BitArray(taxa)\n for i, tree_name in enumerate(tree_names_total):\n tree = tree_dict_total[tree_name]\n wts = tree_wts_total[i] if tree_wts_total else 1.0\n nodetobitMap = {node:toBitArr.from_clade(node.get_leaf_names()) for node in tree.traverse('postorder') if not node.is_root()}\n for node in tree.traverse('levelorder'):\n if not node.is_root():\n rootsplit = toBitArr.minor(nodetobitMap[node]).to01()\n # rootsplit_supp_dict[rootsplit] += wts\n if rootsplit not in rootsplit_supp_dict:\n rootsplit_supp_dict[rootsplit] = 0.0\n rootsplit_supp_dict[rootsplit] += wts\n if not node.is_leaf():\n child_subsplit = min([nodetobitMap[child] for child in node.children]).to01()\n for sister in node.get_sisters():\n parent_subsplit = (nodetobitMap[sister] + nodetobitMap[node]).to01()\n if parent_subsplit not in subsplit_supp_dict:\n subsplit_supp_dict[parent_subsplit] = OrderedDict()\n if child_subsplit not in subsplit_supp_dict[parent_subsplit]:\n subsplit_supp_dict[parent_subsplit][child_subsplit] = 0.0\n subsplit_supp_dict[parent_subsplit][child_subsplit] += wts\n if not node.up.is_root():\n parent_subsplit = (~nodetobitMap[node.up] + nodetobitMap[node]).to01()\n if parent_subsplit not in subsplit_supp_dict:\n subsplit_supp_dict[parent_subsplit] = OrderedDict()\n if child_subsplit not in subsplit_supp_dict[parent_subsplit]:\n subsplit_supp_dict[parent_subsplit][child_subsplit] = 0.0 \n subsplit_supp_dict[parent_subsplit][child_subsplit] += wts\n \n parent_subsplit = (~nodetobitMap[node] + nodetobitMap[node]).to01()\n if parent_subsplit not in subsplit_supp_dict:\n subsplit_supp_dict[parent_subsplit] = OrderedDict()\n if child_subsplit not in subsplit_supp_dict[parent_subsplit]:\n subsplit_supp_dict[parent_subsplit][child_subsplit] = 0.0\n subsplit_supp_dict[parent_subsplit][child_subsplit] += wts\n \n if not node.up.is_root():\n bipart_bitarr = min([nodetobitMap[sister] for sister in node.get_sisters()] + [~nodetobitMap[node.up]])\n else:\n bipart_bitarr = min([nodetobitMap[sister] for sister in node.get_sisters()])\n child_subsplit = bipart_bitarr.to01()\n if not node.is_leaf():\n for child in node.children:\n parent_subsplit = (nodetobitMap[child] + ~nodetobitMap[node]).to01()\n if parent_subsplit not in subsplit_supp_dict:\n subsplit_supp_dict[parent_subsplit] = OrderedDict()\n if child_subsplit not in subsplit_supp_dict[parent_subsplit]:\n subsplit_supp_dict[parent_subsplit][child_subsplit] = 0.0\n subsplit_supp_dict[parent_subsplit][child_subsplit] += wts\n \n parent_subsplit = (nodetobitMap[node] + ~nodetobitMap[node]).to01()\n if parent_subsplit not in subsplit_supp_dict:\n subsplit_supp_dict[parent_subsplit] = OrderedDict()\n if child_subsplit not in subsplit_supp_dict[parent_subsplit]:\n subsplit_supp_dict[parent_subsplit][child_subsplit] = 0.0\n subsplit_supp_dict[parent_subsplit][child_subsplit] += wts\n\n return rootsplit_supp_dict, subsplit_supp_dict \n\n\nclass BitArray(object):\n def __init__(self, taxa):\n self.taxa = taxa\n self.ntaxa = len(taxa)\n self.map = {taxon: i for i, taxon in enumerate(taxa)}\n \n def combine(self, arrA, arrB):\n if arrA < arrB:\n return arrA + arrB\n else:\n return arrB + arrA \n \n def merge(self, key):\n return bitarray(key[:self.ntaxa]) | bitarray(key[self.ntaxa:])\n \n def decomp_minor(self, key):\n return min(bitarray(key[:self.ntaxa]), bitarray(key[self.ntaxa:]))\n \n def minor(self, arrA):\n return min(arrA, ~arrA)\n \n def from_clade(self, clade):\n bit_list = ['0'] * self.ntaxa\n for taxon in clade:\n bit_list[self.map[taxon]] = '1'\n return bitarray(''.join(bit_list))\n\n def from_digits(self, clade):\n bit_list = ['0'] * self.ntaxa\n for taxon in clade:\n bit_list[taxon] = '1'\n return bitarray(''.join(bit_list))","repo_name":"tyuxie/ARTree","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26916997596","text":"import os\n\nimport numpy as onp\nfrom scipy.io import savemat\nimport argparse\n\nfrom data import dgmm_dgp, modified_dgmm_dgp, sigmoid_dgp, load_data\nfrom utils import data_split\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-path', type=str, default='/tmp/iv-data')\nparser.add_argument('-N', type=int, default=2000)\nparser.add_argument('-nonadditive', action='store_true', default=True)\nparser.add_argument('-sigmoid', action='store_true', default=False)\nparser.add_argument('-hllt', action='store_true', default=False)\nparser.add_argument('-hllt_add_endo', action='store_true', default=True)\nparser.add_argument('-data_corr', default=0.5, type=float)\n\n\ndef gen_dict(Dtrain, seed):\n Dtrain, Dval = data_split(*Dtrain, split_ratio=0.5, rng=onp.random.RandomState(seed))\n to_dump = {}\n for cat in ['train', 'val']:\n suf = cat[:2]\n z, x, y = locals()['D'+cat]\n to_dump.update({'z'+suf: z, 'x'+suf: x, 'y'+suf: y})\n # to_dump['xte'] = onp.linspace(-4, 4, 200)[:, None] # deprecated. use va\n # to_dump['fte'] = true_f(to_dump['xte'])\n to_dump['fva'] = true_f(to_dump['xva'])\n to_dump['ftr'] = true_f(to_dump['xtr'])\n return to_dump\n\n\nargs = parser.parse_args()\nprint(args)\nos.makedirs(args.path, exist_ok=True)\n\nif not args.sigmoid and not args.hllt:\n dg_fn = modified_dgmm_dgp if args.nonadditive else dgmm_dgp\n for typ in ['sin', 'abs', 'step', 'linear']:\n print(typ)\n for i in range(10): # 20\n (Dtrain, _), true_f, _ = dg_fn(\n args.N*3, typ=typ, seed=i, split_ratio=2/3, iv_strength=args.data_corr)\n to_dump = gen_dict(Dtrain, i)\n savemat(os.path.join(\n args.path, f'{typ}-{args.nonadditive}-{args.data_corr}-{args.N}-{i}.mat'), to_dump)\nelif args.sigmoid:\n for nonadditive in [True, False]:\n print(nonadditive)\n for i in range(10):\n (Dtrain, _), true_f, _ = sigmoid_dgp(\n args.N*3, seed=i, split_ratio=2/3, nonadditive=nonadditive)\n to_dump = gen_dict(Dtrain, i)\n savemat(os.path.join(args.path, f'sigm-{nonadditive}-{args.N}-{i}.mat'), to_dump)\nelse:\n # the R language is a disaster, so we do the preprocessing here\n def standardize(inp, stats=None):\n if stats is not None:\n mm, ss = stats\n else:\n mm, ss = inp.mean(0), inp.std(0)\n return (inp-mm)/ss, (mm, ss)\n\n for i in range(10):\n (Dtrain, Dtest), true_f, _ = load_data('hllt', args.N*3, seed=i, args=args, split_ratio=2/3)\n to_dump_tr = gen_dict(Dtrain, i)\n to_dump = {}\n to_dump['ztr'], _ = standardize(to_dump_tr['ztr'])\n to_dump['xtr'], xstats = standardize(to_dump_tr['xtr'])\n to_dump['ytr'] = to_dump_tr['ytr']\n to_dump['xva'], _ = standardize(Dtest[1], xstats)\n to_dump['ftr'] = true_f(to_dump_tr['xtr'])\n to_dump['fva'] = true_f(Dtest[1])\n savemat(os.path.join(args.path, f'inp-hllt-{args.data_corr}-{args.N}-{i}.mat'), to_dump)\n\n","repo_name":"meta-inf/qbdiv","sub_path":"data/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70082448905","text":"class Solution:\n def longestSubsequence(self, s: str, k: int) -> int:\n n = len(s)\n\n cur = 0\n size = 0\n for i in range(n - 1, -1, -1):\n if s[i] == '0':\n size += 1\n else:\n nxt = (1 << size) + cur\n if nxt > k:\n break\n cur = nxt\n size += 1\n\n # print(i, cur, size)\n for j in range(i):\n if s[j] == '0':\n size += 1\n\n return size\n\n\ns = \"1001010\"\nk = 5\ns = \"00101001\"\nk = 1\nprint(Solution().longestSubsequence(s, k))\n","repo_name":"tiandiyijian/myLeetcode","sub_path":"week298/6099.py","file_name":"6099.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14173214063","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[62]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ndf = pd.read_csv(r'C:\\Users\\saaks\\OneDrive\\Desktop\\Iris.csv')\ndf\n\n\n# In[63]:\n\n\ndf.describe()\n\n\n# In[64]:\n\n\nspecies = df[\"Species\"].tolist()\nX = df.drop(\"Species\", 1)\n\n\n# In[65]:\n\n\n# Standardize the data\nX = (X - X.mean()) / X.std(ddof=0)\nX\n\n\n# In[66]:\n\n\ncovariance=np.dot(X.T,X)/(X.shape[0]-1)\nprint(covariance)\n\n\n# In[67]:\n\n\neigenvalues,eigenvector=np.linalg.eig(covariance)\nprint(\"eigenvalues \\n\",eigenvalues)\nprint(\"eigenvector \\n\",eigenvector)\n\n\n# In[68]:\n\n\neigenvector=eigenvector.T\nprint(\"eigenvector after Transpose\\n\",eigenvector)\nindexs=np.argsort(eigenvalues)[::-1]\n#taking those indices and storing in eigenvalues and eigenvectors accordingly\neigenvector=eigenvector[indexs]\nprint(\"eigenvector after indexes \\n\",eigenvector)\neigenvalues=eigenvalues[indexs]\nprint(\"eigenvalues \\n\",eigenvalues) \n\n\n# In[69]:\n\n\ntotal = sum(eigenvalues)\nvariance_of_each_feature =(eigenvalues / np.sum(eigenvalues))*100\nprint(\"variance of each feature-->\",variance_of_each_feature)\nplt.figure(figsize=(8,4))\nplt.bar(range(5),variance_of_each_feature, alpha=0.6)\nplt.ylabel('Percentage of explained variance')\nplt.xlabel('Dimensions')\n\n\n# In[70]:\n\n\nfeatures=eigenvector[:2]\nprint(\"features\",features)\n\n\n# In[71]:\n\n\nnp.dot(X,features.T)\n\n\n# In[96]:\n\n\npc1=X.dot(eigenvector.T[0])\npc2=X.dot(eigenvector.T[1])\nres=pd.DataFrame(pc1,columns=[\"PC1\"])\nres[\"PC2\"]=pc2\nres['target']=species\nres.head()\n\n\n# In[98]:\n\n\nfig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('PC1', fontsize = 15)\nax.set_ylabel('PC2', fontsize = 15)\nax.set_title('2 component PCA', fontsize = 20)\ntargets = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']\ncolors = ['r', 'g', 'b']\nfor target, color in zip(targets,colors):\n indicesToKeep = res['target'] == target\n ax.scatter(res.loc[indicesToKeep,'PC1']\n , res.loc[indicesToKeep, 'PC2']\n , c = color\n , s = 50)\nax.legend(targets)\nax.grid()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"1nt18is038/1NT18IS038_jothsna_A_ML","sub_path":"Jothsna-PCA.py","file_name":"Jothsna-PCA.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21590953620","text":"# CS Dojo https://www.youtube.com/watch?v=RBSGKlAvoiM\n# h -> n0 -> n1 -> n2 -> null\n\nclass Node:\n def __init__(self, data=None):\n self.data = data\n self.nextNode = None\n\n def insertNode(self, inserted_node):\n if self.nextNode is not None:\n oldNextNode = self.nextNode\n self.nextNode = inserted_node\n inserted_node.nextNode = oldNextNode\n else:\n self.nextNode = inserted_node\n return inserted_node\n\n\nclass singlyLinkedList:\n def __init__(self):\n self.head = None\n\n def printLinkedList(self):\n currentNode = self.head\n while currentNode is not None:\n print(currentNode.data, end=\"->\")\n currentNode = currentNode.nextNode\n\n print(\"null\")\n\n def reverseLinkedList(self):\n previousNode = None\n currentNode = self.head\n\n # reversing linked list\n # null h -> n0 -> n1 -> n2 -> null\n # null <- h n0 -> n1 -> n2 -> null\n # null <- h <- n0 n1 -> n2 -> null\n # null <- h <- n0 <- n1 n2 -> null\n while currentNode is not None:\n followingNode = currentNode.nextNode\n currentNode.nextNode = previousNode\n previousNode = currentNode\n currentNode = followingNode\n\n # printing reversed linked list\n currentNode = previousNode\n while currentNode is not None:\n print(currentNode.data, end=\"->\")\n currentNode = currentNode.nextNode\n\n print(\"null\")\n\n\nclass doublyNode:\n def __init__(self, data=None):\n self.data = data\n self.nextNode = None\n self.prevNode = None\n\n\nclass doublyLinkedList:\n def __init__(self):\n self.head = None\n\n def printLinkedList(self):\n currentNode = self.head\n while currentNode is not None:\n if currentNode.nextNode is None:\n print(currentNode.data, end=\"->\")\n else:\n print(currentNode.data, end=\"<->\")\n currentNode = currentNode.nextNode\n\n print(\"null\")\n\n def printLinkedListBackwards(self):\n currentNode = self.head\n while currentNode.nextNode is not None:\n currentNode = currentNode.nextNode\n\n while currentNode is not None:\n if currentNode.prevNode is None:\n print(currentNode.data, end=\"->\")\n else:\n print(currentNode.data, end=\"<->\")\n currentNode = currentNode.prevNode\n\n print(\"null\")\n","repo_name":"ernestang98/ernestang98","sub_path":"online/data structures/data_structures/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25968839763","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom datetime import datetime\nfrom models import Announcement, Prayer, PrayerForm, Definition\n\nDATE_FMT = \"%B %d, %Y\"\n\ndef home(request):\n template = \"eshrine/home.html\"\n announcements = Announcement.objects.order_by('-date')\n ann_texts = []\n for ann in announcements:\n ann_texts.append({'date': ann.date.strftime(DATE_FMT),\n 'annoucement': ann.announcement})\n prayer_texts = []\n prayers = Prayer.objects.order_by('-date')\n for prayer in prayers:\n prayer_texts.append({'date': prayer.date.strftime(DATE_FMT),\n 'author': prayer.author,\n 'prayer': prayer.prayer\n })\n prayer_form = PrayerForm()\n context = {\"now\": datetime.now().strftime(DATE_FMT),\n \"annoucements\": ann_texts,\n \"prayers\": prayer_texts,\n \"prayer_form\": prayer_form}\n return render(request, template, context)\n\ndef pray(request):\n if request.method == \"POST\":\n form = PrayerForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n form.save()\n msg = 'SUCCESS'\n \n # Maybe one day:\n# mail_msg = \"New prayer added!\\n\\n%s\" % form.cleaned_data\n# try:\n# send_mail(\"[OH TZI] prayer added\", mail_msg, 'tafitaf@gmail.com',\n# recipient_list=['tafitaf@gmail.com'])\n# except Exception as e:\n# raise Exception(e)\n else:\n msg = \"FAILURE = %s\" % (form.errors)\n raise Exception(msg)\n else:\n msg = 'Unsupported opertaion: GET'\n return HttpResponse(msg);\n\ndef understand(request):\n template = \"eshrine/understand.html\"\n definitions = Definition.objects.all()\n words = [{\"word\": d.word,\n \"definition\": d.definition,\n \"author\": d.author} for d in definitions]\n context = {\"words\": words}\n return render(request, template, context)","repo_name":"tafi/ohtzi","sub_path":"eshrine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38526757841","text":"import unittest\nfrom unittest import TestCase\nfrom peewee import *\n\nimport config\n\nimport db_contact\nimport db_initial\nfrom db_initial import Artist, Artwork\n\ntest_db_path = 'database/test_gallery_db.sqlite'\nconfig.db_path = test_db_path\ndb = SqliteDatabase(config.db_path)\n\n# http://docs.peewee-orm.com/en/latest/peewee/database.html\n# and help from Tom to figure out getting it to connect to a test database\n# how I understand it, takes the models from the true database setup\nmodels = [Artist, Artwork]\n# then those models get bound to the test database. then connects and creates tables\n# from those models.\ndb.bind(models, bind_refs=False, bind_backrefs=False)\ndb.connect()\ndb.create_tables(models)\n\nclass TestArtworkDB(TestCase):\n\n # setup at the start to clear any leftover info, then teardown at the end so the database is emptied.\n def setUp(self):\n self.clear_info()\n\n def tearDown(self):\n self.clear_info()\n\n # adding artists for duplicate checks later\n def add_artists_for_testing(self):\n self.artist1 = Artist(name='Sam', email='sam@sam.test')\n self.artist2 = Artist(name='John', email='john@john.test')\n self.artwork1 = Artwork(artist_id=1, artwork_name='test artwork', price=300.00, availability=True)\n self.artwork2 = Artwork(artist_id=2, artwork_name='another artwork', price=250.99, availability=False)\n self.artist1.save()\n self.artist2.save()\n\n def test_add_artist_with_new_artist(self):\n # clears, goes to add the artist data, then a search to see if it was saved properly\n self.clear_info()\n db_contact.add_artist(('test', 'test@email'))\n check = Artist.select().where(Artist.name == 'test')\n self.assertEquals('test', check[0].name)\n\n def test_add_artist_with_duplicate_name(self):\n self.clear_info()\n self.add_artists_for_testing()\n # after the test artists are added, this add_artist() causes the assert raise,\n # which it should because that artist is there.\n with self.assertRaises(DatabaseError):\n db_contact.add_artist(('John', 'new@test.test'))\n\n def test_add_new_artwork(self):\n self.clear_info()\n self.add_artists_for_testing()\n db_contact.add_artwork((1, 'test artwork', 300.00, True))\n check = Artwork.select().where(Artwork.artwork_name == 'test artwork')\n self.assertEquals('test artwork', check[0].artwork_name)\n\n def clear_info(self):\n Artist.delete().execute()\n Artwork.delete().execute()\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"htietze/gallery_database_project","sub_path":"test_artist_db.py","file_name":"test_artist_db.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74595633225","text":"from Home import st\nfrom Home import face_rec\nfrom streamlit_webrtc import webrtc_streamer\nimport av\nimport cv2\nimport numpy as np\nst.set_page_config(page_title='Registeration Form',layout='centered')\n\nst.subheader(\"Registeration Form\")\n\n# init registration form \nregistration_form=face_rec.Registrationform()\n\n# collect person name and role\nperson_name=st.text_input(label='Name',placeholder='First & Last Name')\nrole=st.selectbox(label='Select your Role:',options=('Student','Teacher'))\n\n# collect facial embeddings\n#Real time Prediction\ndef video_frame_callback(frame): #can't store embeddings in redis directly\n img = frame.to_ndarray(format=\"bgr24\") #3d np array\n reg_img,embedding=registration_form.get_embedding(img)\n # save embeddings on local computer\n if embedding is not None:\n with open('face_embedding.txt',mode='ab') as f:\n np.savetxt(f,embedding)\n\n\n return av.VideoFrame.from_ndarray(reg_img, format=\"bgr24\")\n\nwebrtc_streamer(key=\"registeration\", video_frame_callback=video_frame_callback)\n\n# save data in redis\nif st.button('Submit:'):\n return_val= registration_form.save_data_in_redis_db(person_name,role)\n if return_val==True:\n st.success(f\"{person_name} registered successfully\")\n elif return_val=='name_false':\n st.error(\"Name cannot be empty!!\")\n elif return_val=='file_false':\n st.error(\"File not found... Please refresh the page and try again!\")\n\n\n\n \n ","repo_name":"tanupriya9102/Attendance-System","sub_path":"4_attendance_app/pages/2_Registeration_form.py","file_name":"2_Registeration_form.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21185573399","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n# @Project :Vit\n# @File :models\n# @Date :2022/1/18 17:03\n# @Author :Xinqi Chen\n# @Software :PyCharm\n-------------------------------------------------\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch import nn\nfrom torch import Tensor\nfrom PIL import Image\nfrom torchvision.transforms import Compose, Resize, ToTensor\nfrom einops import rearrange, reduce, repeat\nfrom einops.layers.torch import Rearrange, Reduce\nfrom torchsummary import summary\n\n\nclass PatchEmbedding(nn.Module):\n def __init__(self, in_channels=3, patch_size=16, emb_size=768, img_size=224):\n super(PatchEmbedding, self).__init__()\n self.patch_size = patch_size\n self.embedding = nn.Sequential(\n # Rearrange('b c (h p1) (w p2) -> b (h w) (c p1 p2)', p1=patch_size, p2=patch_size),\n # nn.Linear(patch_size*patch_size*in_channels, emb_size)\n nn.Conv2d(in_channels, emb_size, kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size)),\n Rearrange('b e (h) (w) -> b (h w) e'),\n )\n\n self.cls_token = nn.Parameter(torch.randn(1, 1, emb_size))\n self.positions = nn.Parameter(torch.randn((img_size // patch_size) ** 2 + 1, emb_size))\n\n def forward(self, x):\n b, _, _, _ = x.shape\n x = self.embedding(x)\n cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)\n # prepend the cls token to the input\n x = torch.cat([cls_tokens, x], dim=1)\n x = x + self.positions\n return x\n\n\nclass MultiAttn(nn.Module):\n def __init__(self, emb_size=512, head=8, dropout=0.8):\n super(MultiAttn, self).__init__()\n self.emb_size = emb_size\n self.head = head\n self.q = nn.Linear(self.emb_size, self.emb_size)\n self.k = nn.Linear(self.emb_size, self.emb_size)\n self.v = nn.Linear(self.emb_size, self.emb_size)\n self.dropout = nn.Dropout(dropout)\n self.projection = nn.Linear(self.emb_size, self.emb_size)\n\n def forward(self, x, mask=None):\n q = rearrange(self.q(x), 'b q (n d) -> b n q d', n=self.head)\n k = rearrange(self.k(x), 'b q (n d) -> b n q d', n=self.head)\n v = rearrange(self.v(x), 'b q (n d) -> b n q d', n=self.head)\n energy = torch.einsum('bnqd, bnkd -> bnqk', q, k)\n if mask is not None:\n fill_value = torch.finfo(torch.float32).min\n energy.mask_fill(~mask, fill_value)\n scaling = self.emb_size ** (1/2)\n att = F.softmax(energy, dim=-1) / scaling\n att = self.dropout(att)\n out = torch.einsum('bnqk, bnvd -> bnqd', att, v)\n out = rearrange(out, \"b n q d -> b q (n d)\")\n out = self.projection(out)\n return out\n\n\nclass ResAdd(nn.Module):\n def __init__(self, fn):\n super(ResAdd, self).__init__()\n self.fn = fn\n\n def forward(self, x):\n res = x\n x = self.fn(x)\n x += res\n return x\n\n\nclass Feedforward(nn.Module):\n def __init__(self, embed_size=768, expasion=4, dropout=0):\n super(Feedforward, self).__init__()\n self.l1 = nn.Linear(embed_size, embed_size*expasion)\n self.activ = nn.GELU()\n self.dropout = nn.Dropout(dropout)\n self.l2 = nn.Linear(embed_size*expasion, embed_size)\n\n def forward(self, x):\n x = self.l1(x)\n x = self.dropout(self.activ(x))\n x = self.l2(x)\n return x\n\n\nclass TransformerEnoderBlock(nn.Module):\n def __init__(self, embed_size=768, head=8, expansion=4, dropout1=0, dropout2=0, dropoutp=0):\n super(TransformerEnoderBlock, self).__init__()\n self.embed_size = embed_size\n self.head = head\n self.expansion = expansion\n self.dropout1 = dropout1\n self.dropout2 = dropout2\n self.multiattn = MultiAttn(self.embed_size, self.head, self.dropout1)\n self.ff = Feedforward(self.embed_size, self.expansion, self.dropout2)\n self.ln1 = nn.LayerNorm(self.embed_size)\n self.ln2 = nn.LayerNorm(self.embed_size)\n self.res1 = ResAdd(nn.Sequential(\n self.ln1,\n self.multiattn,\n nn.Dropout(dropoutp)\n ))\n self.res2 = ResAdd(nn.Sequential(\n self.ln2,\n self.ff,\n nn.Dropout(dropoutp)\n ))\n\n def forward(self, x):\n x = self.res1(x)\n x = self.res2(x)\n return x\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, n=4):\n super(TransformerEncoder, self).__init__()\n self.n = n\n self.model = nn.Sequential(*[TransformerEnoderBlock() for i in range(self.n)])\n\n def forward(self, x):\n return self.model(x)\n\n\nclass MLPClassifier(nn.Module):\n def __init__(self, embed_size=768, class_num=1000):\n super(MLPClassifier, self).__init__()\n self.norm = nn.LayerNorm(embed_size)\n self.avg = Reduce('b n e -> b e', reduction='mean')\n self.linear = nn.Linear(embed_size, class_num)\n\n def forward(self, x):\n x = self.norm(x)\n x = self.avg(x)\n x = self.linear(x)\n return x\n\n\nclass Vit(nn.Module):\n def __init__(self, in_channels=3, patch_size=16, emb_size=768, img_size=224, n=4, class_num=1000):\n super(Vit, self).__init__()\n self.layer = nn.Sequential(\n PatchEmbedding(in_channels=in_channels, patch_size=patch_size, emb_size=emb_size, img_size=img_size),\n TransformerEncoder(n=n),\n MLPClassifier(embed_size=emb_size, class_num=class_num)\n )\n\n def forward(self, x):\n x = self.layer(x)\n return x\n\n\nif __name__ == '__main__':\n data = torch.randn((1, 3, 224, 224))\n data = Vit()(data)\n print(data.shape)\n summary(Vit(), (3, 224, 224), device='cpu')\n","repo_name":"Master-Chen-Xin-Qi/Vit","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6781282272","text":"'''\nCreated on 2011 4 1\n\n@author: cihancimen\n'''\nfrom risk.command.ListCommand import ListCommand\nfrom risk.command.PlaceCommand import PlaceCommand\nfrom risk.command.MoveCommand import MoveCommand\nfrom risk.command.AttackCommand import AttackCommand\nfrom risk.command.TradeCommand import TradeCommand\nfrom copy import copy\nfrom risk.command.AbstractCommand import AbstractCommand\n\nclass CommandParser(object):\n '''\n classdocs\n '''\n '''Place '''\n COMMAND_PLACE = \"place\"\n '''Move '''\n COMMAND_MOVE = \"move\"\n\n COMMAND_LIST = \"list\"\n '''Pass'''\n COMMAND_PASS = \"pass\"\n '''Attack '''\n COMMAND_ATTACK = \"attack\"\n '''Trade '''\n COMMAND_TRADE = \"trade\"\n COMMAND_EXIT = \"exit\"\n COMMAND_MAP = \"map\"\n COMMAND_MAPIMG = \"mapimg\"\n AVAILABLE_COMMANDS = [COMMAND_PLACE, COMMAND_LIST, COMMAND_MOVE, COMMAND_PASS, COMMAND_ATTACK, COMMAND_TRADE, COMMAND_EXIT, COMMAND_MAP, COMMAND_MAPIMG]\n\n def __init__(self, game):\n '''\n Constructor\n '''\n self.game = game\n def parse(self, player, command_str):\n orig = copy(command_str)\n words = command_str.strip().split(\" \")\n \n for i in range(len(words)):\n words[i] = words[i].strip()\n for i in range(len(words)):\n if(words[i] == \"\"):\n del words[i]\n if(len(words) < 1):\n raise ParseException('Enter something other than whitespaces\\n')\n cmd = words[0].lower()\n if(not (cmd in CommandParser.AVAILABLE_COMMANDS)):\n raise ParseException('Unkown command: %s\\n' % (cmd))\n \n if(cmd == CommandParser.COMMAND_PLACE):\n try:\n ter = self.game.territories[words[1]]\n if(len(words) > 2):\n num = int(words[2])\n else:\n num = 1\n return PlaceCommand(orig, ter, num)\n except KeyError as e:\n raise ParseException('Unknown Territory: %s\\n' % (str(e)))\n except Exception as e:\n raise ParseException(str(e))\n \n elif(cmd == CommandParser.COMMAND_LIST):\n try:\n if(words[1] == \"my\"):\n terrs = \"\"\n i = 1\n terrs += player.color + \"\\n\"\n for ter in self.game.territories.values():\n if(ter.occupant == player):\n terrs += str(i) + \" \" + ter.name + \" - Army Number:\" + str(ter.armies) + \"\\n\"\n i += 1\n terrs += \"Free Armies:\" + str(player.armies)\n return ListCommand(orig, terrs)\n elif(words[1] == \"unoccupied\"):\n terrs = \"\"\n for ter in self.game.territories.values():\n if(not(ter.occupant)):\n terrs += ter.name + \" \"\n return ListCommand(orig, terrs)\n elif(words[1] == \"cards\"):\n cards = 'Cards(' + str(len(player.cards)) + '):\\n'\n for index, card in enumerate(player.cards):\n cards += \"\\t\" + str(index) + \" - \" + card.type + \" \" + (card.territory.name if(card.territory) else \"\") + \"\\n\"\n return ListCommand(orig, cards)\n elif(words[1] == 'neighbours'):\n if(len(words) > 2):\n try:\n n = \"Neighbours of \" + words[2] + \" :\\n\"\n terr = self.game.territories[words[2]]\n for ne in terr.neighbours:\n n += \"\\t\" + ne.name + \"(\" + ne.continent.name + \" \" + (ne.occupant.color if(ne.occupant) else 'unoccupied') + \"- \" + str(ne.armies) + \" armies):\\n\"\n return ListCommand(n)\n except KeyError as e:\n raise ParseException('Unknown Territory: %s\\n' % (str(e)))\n except Exception as ex:\n raise ParseException(str(ex)) \n else:\n n = \"Neighbours:\\n\"\n for terr in player.occupied:\n n += terr.name + \"(\" + terr.continent.name + \"- \" + str(terr.armies) + \" armies):\\n\"\n for ne in terr.neighbours:\n if(not (ne.occupant == player)):\n n += \"\\t\" + ne.name + \"( \" + ne.continent.name + \" \" + (ne.occupant.color if(ne.occupant) else 'unoccupied') + \"- \" + str(ne.armies) + \" armies):\\n\"\n return ListCommand(orig, n)\n elif(words[1] == 'all'):\n all = \"World State:\\n\"\n for con in self.game.continents.values():\n all += con.name + \":\\n\"\n for terr in con.territories:\n all += \"\\t\" + terr.name + \" \" + (terr.occupant.color if(terr.occupant) else ' unoccupied ') + \" \" + str(terr.armies) + \"\\n\"\n return ListCommand(orig, all)\n elif(words[1] == 'mission'):\n return ListCommand(orig, 'Your mission is: ' + player.mission.verbose)\n elif(words[1] == 'commands'):\n commands = \"\"\n commands += \"----------\\n\"\n commands += \"list my : list the player's territories, placed and free armies\\n\"\n commands += \"list unoccupied : list the unoccupied territories\\n\"\n commands += \"list cards : list the player's risk cards\\n\"\n commands += \"list mission : shows the player's mission\\n\"\n commands += \"list neighbours : list the player's occupied territories' neighbours\\n\"\n commands += \"list neighbours : list the given territory's neighbours\\n\"\n commands += \"list all : list each continent and its territories with occupants\\n\"\n commands += \"----------\\n\"\n commands += \"place : place entered number of army in given territory\\n\"\n commands += \"----------\\n\"\n commands += \"move : move armies from one territory to another territory\\n\"\n commands += \"----------\\n\"\n commands += \"attack : attack from one territory to another territory with entered number of dice\\n\"\n commands += \"----------\\n\"\n commands += \"trade : trade in with given cards\\n\"\n commands += \"----------\\n\"\n commands += \"exit\\n\"\n return ListCommand(orig, commands)\n else:\n raise ParseException('Not Valid Command\\n')\n except Exception as e:\n raise ParseException(str(e))\n \n elif(cmd == CommandParser.COMMAND_MOVE):\n try:\n if(len(words) != 4):\n raise ParseException('Usage: Move \\n')\n fromTerr = self.game.territories[words[1]]\n toTerr = self.game.territories[words[2]]\n num = int(words[3])\n return MoveCommand(orig, fromTerr, toTerr, num)\n except KeyError as e:\n raise ParseException('Unknown Territory: %s\\n' % (str(e)))\n except Exception as e:\n raise ParseException(str(e))\n elif(cmd == CommandParser.COMMAND_ATTACK):\n try:\n if(len(words) != 4):\n raise ParseException('Usage: Attack \\n')\n fromTerr = self.game.territories[words[1]]\n toTerr = self.game.territories[words[2]]\n num = int(words[3])\n return AttackCommand(orig, fromTerr, toTerr, num)\n except KeyError as e:\n raise ParseException('Unknown Territory: %s\\n' % (str(e)))\n except Exception as e:\n raise ParseException(str(e))\n elif(cmd == CommandParser.COMMAND_TRADE):\n try:\n if(len(words) != 4):\n raise ParseException('Usage: Trade \\n')\n cardNum1 = int(words[1])\n if(cardNum1 > len(self.game.turner.player.cards) or cardNum1 < 0):\n raise ParseException('First card number is not legal please enter between 0 and %s\\n' %(len(self.game.turner.player.cards)))\n cardNum2 = int(words[2])\n if(cardNum2 > len(self.game.turner.player.cards) or cardNum2 < 0):\n raise ParseException('Second card number is not legal please enter between 0 and %s\\n' %(len(self.game.turner.player.cards)))\n cardNum3 = int(words[3])\n if(cardNum3 > len(self.game.turner.player.cards) or cardNum3 < 0):\n raise ParseException('Third card number is not legal please enter between 0 and %s\\n' %(len(self.game.turner.player.cards)))\n\n cards = []\n cards.append(self.game.turner.player.cards[cardNum1])\n cards.append(self.game.turner.player.cards[cardNum2])\n cards.append(self.game.turner.player.cards[cardNum3])\n return TradeCommand(orig, cards)\n except Exception as e:\n raise ParseException(str(e))\n elif(cmd == CommandParser.COMMAND_PASS):\n return PassCommand(orig)\n elif(cmd == CommandParser.COMMAND_EXIT):\n return ExitCommand(orig)\n elif(cmd == CommandParser.COMMAND_MAP):\n return MapCommand(orig, self.game.map)\n elif(cmd == CommandParser.COMMAND_MAPIMG):\n return MapCommand(orig, self.game.mapImage)\n else:\n raise ParseException('Unknown command: %s\\n' % (cmd))\n\n\nclass PassCommand(AbstractCommand):\n def __init__(self, orig):\n AbstractCommand.__init__(self, orig)\n\nclass ExitCommand(AbstractCommand):\n def __init__(self, orig):\n AbstractCommand.__init__(self, orig)\n \nclass MapCommand(AbstractCommand):\n def __init__(self, orig, map):\n AbstractCommand.__init__(self, orig)\n if(map is None):\n self.map = 'No map'\n else:\n self.map = map\n \nclass MapImageCommand(AbstractCommand):\n def __init__(self, orig, mapImg):\n AbstractCommand.__init__(self, orig)\n if(mapImg is None):\n self.mapImg = 'No map'\n else:\n self.mapImg = mapImg\n\nclass ParseException(Exception):\n def __init__(self, message):\n Exception.__init__(self)\n self.mess = message\n","repo_name":"mkhasseb/ceng498pyrisk","sub_path":"src/python/risk/command/CommandParser.py","file_name":"CommandParser.py","file_ext":"py","file_size_in_byte":11419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3757929402","text":"import re\n\ndef main(input):\n with open(input) as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n contains = overlaps = 0\n\n for line in lines:\n a1, a2, b1, b2 = map(int, re.split(\"-|,\", line))\n if (b1 >= a1 and b2 <= a2) or (a1 >= b1 and a2 <= b2):\n contains += 1\n if a1 <= b2 and a2 >= b1:\n overlaps += 1\n \n print(\"part1: \" + str(contains))\n print(\"part2: \" + str(overlaps))\n\nmain('Day04/input/input.txt')\n\n","repo_name":"henkj/Advent-of-Code-2022","sub_path":"Day04/Day04.py","file_name":"Day04.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29080005165","text":"import unittest\nimport os\n\nfrom qtpy import QtWidgets, QtCore\n\nfrom qtpy.QtTest import QTest\n\nfrom tests.utility import QtTest\n\nfrom t_rax.model.RubyModel import RubyModel\nfrom t_rax.widget.RubyWidget import RubyWidget\nfrom t_rax.controller.RubyController import RubyController\n\nunittest_path = os.path.dirname(__file__)\nunittest_files_path = os.path.join(unittest_path, '..', 'test_files')\ntest_file = os.path.join(unittest_files_path, 'temper_009.spe')\n\n\nclass RubyControllerTest(QtTest):\n def setUp(self):\n self.model = RubyModel()\n self.widget = RubyWidget(None)\n self.controller = RubyController(self.model, self.widget)\n self.model.load_file(test_file)\n\n\n def input_txt_into_text_field(self, text_field, str):\n text_field.setText(\"\")\n QTest.keyClicks(text_field, str)\n QTest.keyClick(text_field, QtCore.Qt.Key_Enter)\n\n def test_set_ruby_position_textfield_retrieve_pressure_and_set_line_pos(self):\n self.input_txt_into_text_field(self.widget.sample_position_txt, \"700\")\n self.assertNotEqual(float(str(self.widget.pressure_lbl.text())), 0)\n self.assertAlmostEqual(self.widget.get_ruby_line_pos(), 700)\n\n def test_set_ruby_reference_position_text_and_retrieve_pressure(self):\n self.input_txt_into_text_field(self.widget.reference_position_txt, \"694.15\")\n self.assertNotEqual(float(str(self.widget.pressure_lbl.text())), 0)\n\n def test_set_sample_temperature_text_and_retrieve_pressure(self):\n self.input_txt_into_text_field(self.widget.sample_position_txt, \"700\")\n p1 = float(str(self.widget.pressure_lbl.text()))\n self.input_txt_into_text_field(self.widget.sample_temperature_txt, \"1500\")\n self.assertLess(float(str(self.widget.pressure_lbl.text())), p1)\n\n def test_set_ruby_reference_temperature_text_and_retrieve_pressure(self):\n self.input_txt_into_text_field(self.widget.sample_position_txt, \"700\")\n p1 = float(str(self.widget.pressure_lbl.text()))\n self.input_txt_into_text_field(self.widget.reference_temperature_txt, \"600\")\n self.assertGreater(float(str(self.widget.pressure_lbl.text())), p1)\n\n def test_change_ruby_equation_of_state_and_retrieve_pressure(self):\n self.input_txt_into_text_field(self.widget.sample_position_txt, \"702\")\n p1 = float(str(self.widget.pressure_lbl.text()))\n self.widget.ruby_scale_cb.setCurrentIndex(1)\n self.assertNotAlmostEqual(float(str(self.widget.pressure_lbl.text())), p1)\n\n self.widget.ruby_scale_cb.setCurrentIndex(2)\n self.assertNotAlmostEqual(float(str(self.widget.pressure_lbl.text())), p1)\n","repo_name":"CPrescher/T-rax","sub_path":"tests/controller_test/test_RubyController.py","file_name":"test_RubyController.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"38026334536","text":"import numpy as np\n\ndef make_input(obs, unit_id):\n width, height = obs['width'], obs['height']\n x_shift = (32 - width) // 2\n y_shift = (32 - height) // 2\n cities = {}\n \n b = np.zeros((20, 32, 32), dtype=np.float32)\n \n for update in obs['updates']:\n strs = update.split(' ')\n input_identifier = strs[0]\n \n if input_identifier == 'u':\n x = int(strs[4]) + x_shift\n y = int(strs[5]) + y_shift\n wood = int(strs[7])\n coal = int(strs[8])\n uranium = int(strs[9])\n if unit_id == strs[3]:\n # Position and Cargo\n b[:2, x, y] = (\n 1,\n (wood + coal + uranium) / 100\n )\n else:\n # Units\n team = int(strs[2])\n cooldown = float(strs[6])\n idx = 2 + (team - obs['player']) % 2 * 3\n b[idx:idx + 3, x, y] = (\n 1,\n cooldown / 6,\n (wood + coal + uranium) / 100\n )\n elif input_identifier == 'ct':\n # CityTiles\n team = int(strs[1])\n city_id = strs[2]\n x = int(strs[3]) + x_shift\n y = int(strs[4]) + y_shift\n idx = 8 + (team - obs['player']) % 2 * 2\n b[idx:idx + 2, x, y] = (\n 1,\n cities[city_id]\n )\n elif input_identifier == 'r':\n # Resources\n r_type = strs[1]\n x = int(strs[2]) + x_shift\n y = int(strs[3]) + y_shift\n amt = int(float(strs[4]))\n b[{'wood': 12, 'coal': 13, 'uranium': 14}[r_type], x, y] = amt / 800\n elif input_identifier == 'rp':\n # Research Points\n team = int(strs[1])\n rp = int(strs[2])\n b[15 + (team - obs['player']) % 2, :] = min(rp, 200) / 200\n elif input_identifier == 'c':\n # Cities\n city_id = strs[2]\n fuel = float(strs[3])\n lightupkeep = float(strs[4])\n cities[city_id] = min(fuel / lightupkeep, 10) / 10\n \n # Day/Night Cycle\n b[17, :] = obs['step'] % 40 / 40\n # Turns\n b[18, :] = obs['step'] / 360\n # Map Size\n b[19, x_shift:32 - x_shift, y_shift:32 - y_shift] = 1\n\n return b","repo_name":"lannguyen0910/lux-rl","sub_path":"imitation-learning/datasets/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"70978764425","text":"import sys\nfrom urwid import *\n\nsongdict = {\"Japanese\":[],\"Romaji\":[],\"English\":[]}\npalette = [(\"Japanese\", \"dark red\", \"black\"),\n (\"Romaji\", \"dark green\", \"black\"),\n (\"English\", \"dark cyan\", \"black\")]\n\ndef main():\n songfile = sys.argv[1] \n songname = get_songinfo(songfile)\n loop = init_interface(songname)\n loop.run()\n\ndef get_songinfo(songfile):\n filelines = get_filelines(songfile)\n songname = get_songname(filelines[1])\n fill_songdict(filelines)\n return songname\n\ndef get_filelines(filename):\n with open(filename) as f: \n filelines = f.readlines()\n return filelines\n\ndef get_songname(nameline):\n init = nameline.index('\"')\n end = nameline.index('\"', init+1)\n return nameline[init+1:end]\n\ndef fill_songdict(filelines):\n for line in filelines:\n if is_songline(line):\n add2dict(line)\n\ndef is_songline(line):\n is_table = line.startswith(\"| \")\n is_head = line.startswith(\"| J\")\n return is_table and not is_head\n\ndef add2dict(line):\n songlines = get_songlines(line)\n songdict[\"Japanese\"].append(songlines[0])\n songdict[\"Romaji\"] .append(songlines[1])\n songdict[\"English\"] .append(songlines[2])\n\ndef get_songlines(line):\n return [l.strip() for l in line.split(\"|\")][1:-1]\n\ndef init_interface(songname):\n walker = create_walker()\n listbox = MyListBox(walker)\n linebox = LineBox(listbox, \" \" + songname + \" \")\n loop = MainLoop(linebox,palette,unhandled_input=exit_loop)\n return loop\n\ndef create_walker():\n jlines = songdict[\"Japanese\"]\n rlines = songdict[\"Romaji\"]\n elines = songdict[\"English\"]\n content = [Divider()]\n for jline, rline, eline in zip(jlines, rlines, elines):\n if jline:\n jtext = Text((\"Japanese\", jline), align=\"center\")\n rtext = Text((\"Romaji\", rline), align=\"center\")\n etext = Text((\"English\", eline), align=\"center\")\n div = Divider()\n content += [jtext, rtext, etext, div]\n else:\n content += [Divider(), Divider()]\n return SimpleFocusListWalker(content)\n \ndef exit_loop(key):\n if key == \"esc\":\n raise ExitMainLoop()\n\nclass MyListBox(ListBox):\n def mouse_event(self,size,event,button,col,row,focus):\n super().mouse_event(size,event,button,col,row,focus)\n if button == 4.0:\n self.keypress(size, \"up\")\n elif button == 5.0:\n self.keypress(size, \"down\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"francocurotto/Japanese-Website","sub_path":"scripts/showlyrics.py","file_name":"showlyrics.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16210418304","text":"# By Janos Potecki\n# University College London\n# January 2018\n\nfrom troposphere import Template, Sub, Parameter, Ref\nfrom troposphere.codepipeline import ( Stages\n , Actions\n , ActionTypeId\n , OutputArtifacts\n )\n\n\ndef getCodeCommit(t: Template, outputfiles: str) -> Stages:\n repo = t.add_parameter(\n Parameter( \"CodeCommitRepo\"\n , Description=\"Name of the CodeCommit Repository\"\n , Type=\"String\"\n ) \n )\n branch = t.add_parameter(\n Parameter( \"Branch\"\n , Description=\"Branch triggering the deployment\"\n , Type=\"String\"\n ) \n )\n actionId = ActionTypeId( Category = \"Source\"\n , Owner = \"AWS\"\n , Version = \"1\"\n , Provider = \"CodeCommit\"\n )\n action = Actions( Name = Sub(\"${AWS::StackName}-LambdaSource\")\n , ActionTypeId = actionId\n , Configuration = {\"BranchName\" : Ref(branch)\n , \"RepositoryName\" : Ref(repo)\n }\n , OutputArtifacts = [OutputArtifacts( Name = outputfiles)]\n , RunOrder = \"1\"\n )\n return Stages( Name = \"Source\"\n , Actions = [ action ]\n )\n","repo_name":"AwsLambdaContinuousDelivery/AwsLambdaContinuousDeliverySourceCodeCommit","sub_path":"awslambdacontinuousdelivery/source/codecommit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11925554968","text":"#!/usr/bin/env python3\nimport logging\nimport json\nimport sqlite3\n\nfrom common import (\n load_config,\n request\n)\n\nlogging.basicConfig(format='%(asctime)s - BINANCE - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\n\nGEMINI_BASE_URL = 'https://api.gemini.com'\n#PAIRS = ['BTCUSDT','ADAUSDT', 'ADABTC', 'ADAETH', 'COTIBTC', 'COTIUSDT', 'LINKBRL', 'LINKBTC', 'LINKUSDT']\n#INTERVAL = ['1m', '3m', '5m', '15m', '1h', '2h', '4h', '6h', '8h', '12h', '1d'] # '1s' == 1 second == more than gigabytes of data. -__-\n\nconfig = lambda: load_config('gemini')\n\ndef extract(pair, interval, *args, **kwargs):\n '''Download json data of pair from Gemini API.'''\n return json.loads(request(f'{GEMINI_BASE_URL}/v2/candles/{pair}/{interval}'))\n\n\ndef transform(data, pair, inteval, *args, **kwargs):\n '''Return a list of dicts with OHLCV + date and pair'''\n if not data:\n return False\n \n result = []\n\n for line in data:\n result.append({\n \"open\": line[1],\n \"high\": line[2],\n \"low\": line[3],\n \"close\": line[4],\n \"volume\": line[5],\n \"pair\": pair,\n \"interval\": inteval,\n \"date\": line[0],\n \"exchange\": 'gemini',\n })\n\n return result\n\ndef load(lines, *args, **kwargs):\n '''Load data to database.'''\n\n data = [(l['open'], l['high'], l['low'], l['close'], l['volume'], l['pair'], l['interval'], l['date'], l['exchange']) for l in lines]\n\n con = sqlite3.connect(\"/var/code/ohlcv.sqlite\")\n cur = con.cursor()\n cur.execute('CREATE TABLE IF NOT EXISTS \"ohlcv\" (\"id\" INTEGER NOT NULL UNIQUE, \"open\" REAL NOT NULL, \"high\" REAL NOT NULL, \"low\" REAL NOT NULL, \"close\" REAL NOT NULL, \"volume\" REAL NOT NULL, \"pair\" TEXT NOT NULL, \"interval\" TEXT NOT NULL, \"date\" TEXT NOT NULL, \"exchange\" TEXT NOT NULL, PRIMARY KEY(\"id\" AUTOINCREMENT), UNIQUE(interval, pair , date ));')\n cur.executemany('INSERT OR IGNORE INTO ohlcv VALUES (NULL, :open, :high, :low, :close, :volume, :pair, :interval, :date, :exchange);', lines)\n con.commit()\n cur.close()\n con.close()\n print(f' {len(list(lines))} rows inserted successfully.')\n\n\ndef etl(pair, interval, *args, **kwargs):\n '''Execute the sequence download, extract, transform, load.'''\n\n logging.info(f'Extracting data {pair} for interval {interval}.')\n e = extract(pair, interval)\n\n logging.info('Transforming data.')\n t = transform(e, pair, interval)\n\n if t:\n logging.info('Load data to DB.')\n load(t)\n else:\n logging.info(f'Pair/Inteval {pair}/{interval} up to date.')\n print(f'Pair/Inteval {pair}/{interval} up to date.')\n\nif __name__ == '__main__':\n assets = config().get('assets')\n\n for asset in assets:\n etl(asset.get('pair'), asset.get('interval'))\n","repo_name":"maurobaraldi/cripto-history-data","sub_path":"tasks/gemini.py","file_name":"gemini.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38456591791","text":"import pygame\nimport pygame.gfxdraw\nimport constants\n\n\nDEFAULT_PIECE_COLOUR = (1, 148, 154)\nDEFAULT_PIECE_SIZE = 8\nADJACENCY_PIECE_RING_SPACING = 5\nADJACENCY_PIECE_RING_WIDTH = 2\nHALF_DEFAULT_PIECE_SIZE = int(DEFAULT_PIECE_SIZE / 2)\nDOUBLE_DEFAULT_PIECE_SIZE = int(DEFAULT_PIECE_SIZE * 2)\n\n\nclass Piece:\n\tdef __init__(self):\n\t\tpass\n\n\tdef validate(self, board, k):\n\t\treturn True\n\n\tdef draw(self, d_surf, pos):\n\t\tpygame.gfxdraw.aacircle(d_surf, int(pos[0]), int(pos[1]), DEFAULT_PIECE_SIZE, DEFAULT_PIECE_COLOUR)\n\t\tpygame.gfxdraw.filled_circle(d_surf, int(pos[0]), int(pos[1]), DEFAULT_PIECE_SIZE, DEFAULT_PIECE_COLOUR)\n\n\nclass LineBlockerPiece(Piece):\n\tdef __init__(self, blocking_direction_list):\n\t\tsuper()\n\t\tself.blocking_direction_list = blocking_direction_list\n\n\tdef validate(self, board, k):\n\t\treturn all([self._validate_direction(board, k, direction) for direction in self.blocking_direction_list])\n\n\tdef _validate_direction(self, board, k, direction):\n\t\tspaces = []\n\t\tif direction == constants.UP:\n\t\t\tspaces = [(k[0], i) for i in range(k[1])]\n\t\telif direction == constants.DOWN:\n\t\t\tspaces = [(k[0], i) for i in range(k[1] + 1, board.num_rows)]\n\t\telif direction == constants.LEFT:\n\t\t\tspaces = [(i, k[1]) for i in range(k[0])]\n\t\telif direction == constants.RIGHT:\n\t\t\tspaces = [(i, k[1]) for i in range(k[0] + 1, board.num_cols)]\n\n\t\treturn all([board.spaces.get(k).isEmpty() for k in spaces])\n\n\tdef draw(self, d_surf, pos):\n\t\tsuper().draw(d_surf, pos)\n\n\t\tif constants.UP in self.blocking_direction_list:\n\t\t\tpygame.draw.line(d_surf, DEFAULT_PIECE_COLOUR, pos, (pos[0], pos[1] - DOUBLE_DEFAULT_PIECE_SIZE), HALF_DEFAULT_PIECE_SIZE)\n\t\tif constants.DOWN in self.blocking_direction_list:\n\t\t\tpygame.draw.line(d_surf, DEFAULT_PIECE_COLOUR, pos, (pos[0], pos[1] + DOUBLE_DEFAULT_PIECE_SIZE), HALF_DEFAULT_PIECE_SIZE)\n\t\tif constants.LEFT in self.blocking_direction_list:\n\t\t\tpygame.draw.line(d_surf, DEFAULT_PIECE_COLOUR, pos, (pos[0] - DOUBLE_DEFAULT_PIECE_SIZE, pos[1]), HALF_DEFAULT_PIECE_SIZE)\n\t\tif constants.RIGHT in self.blocking_direction_list:\n\t\t\tpygame.draw.line(d_surf, DEFAULT_PIECE_COLOUR, pos, (pos[0] + DOUBLE_DEFAULT_PIECE_SIZE, pos[1]), HALF_DEFAULT_PIECE_SIZE)\n\n\nclass AdjacencyPiece(Piece):\n\tdef __init__(self, num_adjacencies):\n\t\tsuper()\n\t\tself.num_adjacencies = num_adjacencies\n\n\tdef validate(self, board, k):\n\t\tadjacent_pieces = [_k for _k in board.get_adjacent_spaces(k) if board.spaces.get(_k).isNotEmpty()]\n\t\treturn len(adjacent_pieces) == self.num_adjacencies\n\n\tdef draw(self, d_surf, pos):\n\t\tfor i in range(1, self.num_adjacencies + 2):\n\t\t\tpygame.gfxdraw.aacircle(d_surf, int(pos[0]), int(pos[1]), i * ADJACENCY_PIECE_RING_SPACING, DEFAULT_PIECE_COLOUR)\n\n","repo_name":"yolkyal/puzzle_board","sub_path":"piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39921953241","text":"# -*- coding: utf-8 -*-\n\n\ndef bubble_sort(seq):\n \"\"\"每次将最大的数字放到最右边\n \"\"\"\n n = len(seq)\n for i in range(n-1):\n for j in range(n-1-i):\n if seq[j] > seq[j+1]:\n seq[j], seq[j+1] = seq[j+1], seq[j]\n\n\ndef select_sort(seq):\n \"\"\"每次将最小的数字放到最左边\n \"\"\"\n n = len(seq)\n for i in range(n-1):\n min_idx = i\n for j in range(i+1, n):\n if seq[j] < seq[min_idx]:\n min_idx = j\n if min_idx != i:\n seq[i], seq[min_idx] = seq[min_idx], seq[i]\n\n\ndef insert_sort(seq):\n \"\"\"每次抽取一个数,按顺序正确放进已抽取的序列内\n \"\"\"\n n = len(seq)\n for i in range(1, n):\n value = seq[i]\n pos = i\n while pos > 0 and value < seq[pos-1]:\n seq[pos] = seq[pos-1] # 如果前面元素比pos大,则让前面元素往后移\n pos -= 1\n seq[pos] = value\n","repo_name":"Peter-Chou/data-structure-python","sub_path":"datastruct/algorithm/easy_sort.py","file_name":"easy_sort.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34171970636","text":"print(\"Desafio Intermediário - Papagaio poliglota\")\nprint()\n\nwhile True: \n try: \n letra = input() \n if (letra == 'esquerda'):\n print('ingles')\n elif (letra == 'direita'):\n print('frances')\n elif (letra == 'nenhuma'):\n print('portugues')\n else:\n print('caiu')\n except EOFError: \n break","repo_name":"dipinho/Python","sub_path":"Desafios Intermediários Py - Unimed-BH/PapagaioPoliglota.py","file_name":"PapagaioPoliglota.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5538841387","text":"from scipy.stats import multivariate_normal, beta\nimport numpy as np\n\ndef u2D(X, m1, v1, m2, v2, off):\n \"\"\"\n Synthetic thermal utility for 2D features\n Multivariate Gaussian Distribution\n Operating temp. and relative humidity\n \"\"\"\n mean_vec = np.array([m1, m2])\n cov_mat = np.array([[v1, off],[off, v2]])\n normal = multivariate_normal(mean=mean_vec,\n cov = cov_mat)\n u_vec = normal.pdf(X)\n \n return u_vec \n\ndef beta_utility_gen(X, x_max, a, b):\n \"\"\"\n Beta distribution over utility value\n X : value of Vertical Illuminance (EV) at which to find the utility function value\n x_max : max value of EV to be considered for getting the proper Beta distribution\n a: Beta distribution parameter\n b : Beta distribution parameter\n Outputs:\n Utility function value\n \"\"\"\n X = X - 15\n x_all = np.linspace(0,1,100)\n u_all = beta.pdf(x_all, a, b)\n u_max_all = np.max(u_all)\n u_min_all = np.min(u_all)\n\n\n x_norm = X/x_max\n u = beta.pdf(x_norm, a, b)\n u_der = ((a - 1)/x_norm - (b - 1)/(1 - x_norm))* u\n u_norm = (u - u_min_all)/(u_max_all - u_min_all)\n u_der_norm = u_der/(u_max_all - u_min_all)\n return u_norm, u_der_norm\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_context(\"talk\", font_scale = 1.4)\n \n xx = np.linspace(20, 27, 200)\n x_max = 20\n a = 6\n b = 8\n u, u_der_norm = beta_utility_gen(xx, x_max, a, b)\n plt.figure(figsize = (12,8))\n plt.plot(xx, u, color = 'b', label = 'true utility', alpha = 0.75)\n plt.xlabel('Indoor Room Temperature [$^{o}$C]')\n plt.ylabel(' Normalized Utility $u_{n}$')\n plt.legend(loc = 'best')\n \n","repo_name":"nawalgao/GPPrefElicit","sub_path":"GPFlowUnimodalPref/SynOccupant/objfunc.py","file_name":"objfunc.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40103159621","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 23 00:27:20 2021\n\nusage: discordlog.py [-h] [-n N] [-z Z] Input_File Output_Name\n\nSplit a chatlog in mutiple files\n\npositional arguments:\n Input_File the path to input file\n Output_Name the output name\n\noptional arguments:\n -h, --help show this help message and exit\n -n N number of msgs on page\n -z Z number of digits in filename\n\n@author: Willem Haffmans\n\"\"\"\n \nfrom bs4 import BeautifulSoup, Tag\nimport argparse\nimport logging\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n \ndef list_split(listA, n):\n \"\"\"Split a list in chunks of size n\"\"\"\n for x in range(0, len(listA), n):\n every_chunk = listA[x: n+x]\n\n if len(every_chunk) < n:\n every_chunk = every_chunk + \\\n [None for y in range(n-len(every_chunk))]\n yield every_chunk\n \n\ndef add_nav():\n \"\"\"add navigation to the body\"\"\"\n if prevpage:\n a_prev = Tag(new_soup,name='a', attrs={'href': args.output + prevpage + '.html'})\n a_prev.string = 'Previous'\n body.append(a_prev)\n if nextpage:\n a_next = Tag(new_soup,name='a', attrs={'href': args.output + nextpage + '.html'})\n a_next.string = 'Next'\n body.append(a_next)\n\n\n# Create the parser\nmy_parser = argparse.ArgumentParser(description='Split a chatlog in mutiple files')\n\n# Add the arguments\nmy_parser.add_argument('-n', action='store', default=5, type=int, help='number of msgs on page')\nmy_parser.add_argument('-z', action='store', default=4, type=int, help= 'number of digits in filename')\n\nmy_parser.add_argument('input',\n metavar='Input_File',\n type=str,\n help='the path to input file')\n\nmy_parser.add_argument('output',\n metavar='Output_Name',\n type=str,\n help='the output name')\n\n# Execute the parse_args() method\nargs = my_parser.parse_args()\n\nlogging.debug('opening the input file')\n\n# Open the input file and parse the html \nwith open(args.input,encoding=\"utf8\") as f:\n soup = BeautifulSoup(f, \"html.parser\")\n\n# find the different sections\nlogging.debug('find head')\nhead = soup.find(\"head\")\nlogging.debug('find preamble')\nheader = soup.find(\"div\", class_=\"preamble\")\nlogging.debug('find chatlog')\nmsgs = soup.find_all(\"div\", class_=\"chatlog__message-group\")\nlogging.debug('find postamble')\nfooter = soup.find(\"div\", class_= \"postamble\")\n\n# split the list of messages in chunks and create a htmlpage for every chunck\nfor n, page in enumerate(list_split(msgs,args.n),1):\n \n logging.debug('create new soup for page' + str(n))\n new_soup = BeautifulSoup()\n logging.debug('creating tags')\n html = Tag(new_soup,name='html')\n body = Tag(new_soup,name='body')\n chatlog = Tag(new_soup,name=\"div\",attrs={'class':'chatlog'})\n \n nextpage = str(n + 1).zfill(args.z) if args.n * n < len(msgs) else None\n prevpage = str(n - 1).zfill(args.z) if n > 1 else None\n \n new_soup.append(html)\n html.append(head)\n html.append(body)\n body.append(header)\n \n add_nav()\n \n for msg in page:\n if msg:\n chatlog.append(msg)\n body.append(chatlog)\n \n add_nav()\n html.append(footer)\n \n# write the output \n logging.debug('writing file')\n with open(args.output+str(n).zfill(args.z)+'.html','wb') as f:\n f.write(new_soup.prettify(encoding='utf-8'))\n logging.debug('done with page' + str(n))\n\n","repo_name":"WHaffmans/chatlog-splitter","sub_path":"discordlog.py","file_name":"discordlog.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40615629007","text":"import sys\nimport numpy as np\nimport json\nimport pandas as pd\nimport collections\n\nif __name__ == '__main__':\n\n for epoch in [20, 100]:\n print(epoch)\n real_res = {'logreg':[-1], 'svc':[-1], 'linearsvc':[-1], 'randomforest':[-1]}\n for gc in [3, 5, 8, 16]:\n for lr in [0.01, 0.1, 0.001]:\n for tpe in ['local', 'localprior']:\n res = collections.defaultdict(lambda :collections.defaultdict(list))\n with open(sys.argv[1], 'r') as f:\n for line in f:\n x = line.strip().split(',', 6)\n if x[1] != tpe:\n continue\n if x[2] != str(gc):\n continue\n if x[3] != str(epoch):\n continue\n if x[5] != str(lr):\n continue\n tmp = json.loads(x[-1])\n\n DS = x[0]\n res[DS]['logreg'].append(tmp['logreg'])\n res[DS]['svc'].append(tmp['svc'])\n res[DS]['linearsvc'].append(tmp['linearsvc'])\n res[DS]['randomforest'].append(tmp['randomforest'])\n\n for DS, lst in res.items():\n if DS != sys.argv[2]:\n continue\n # print('====================')\n # print(DS)\n for clf, v in lst.items():\n mn = np.mean(np.array(v[:5]), axis=0)\n std = np.std(np.array(v[:5]), axis=0)\n\n idx = np.argmax(mn)\n if mn[idx] > real_res[clf][0] and len(v) > 1:\n real_res[clf] = [mn[idx], std[idx], epoch, lr, gc, idx, len(v)]\n # print(epoch, lr, gc, clf, idx, mn[idx], std[idx], len(v))\n print(real_res)\n\n","repo_name":"Shen-Lab/GraphCL","sub_path":"unsupervised_TU/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":484,"dataset":"github-code","pt":"81"} +{"seq_id":"13696285047","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.model_selection import train_test_split\nfrom ..features.feature_engineering import feature_engineering\nfrom app import cos\n\n\ndef make_dataset(path, timestamp, target, cols_to_remove, model_type='RandomForest'):\n\n \"\"\"\n Function to create the dataset used for model training.\n\n Args:\n path (str): Data path.\n timestamp (float): Temporary representation in seconds.\n target (str): Dependent variable to use.\n\n Kwargs:\n model_type (str): Type of model used.\n\n Returns:\n DataFrame, DataFrame. Train and test datasets for the model.\n \"\"\"\n\n print('---> Getting data')\n df = get_raw_data_from_local(path)\n print('---> Train / test split')\n train_df, test_df = train_test_split(df, test_size=0.2, random_state=50)\n print('---> Transforming data')\n train_df, test_df = transform_data(train_df, test_df, timestamp, target, cols_to_remove)\n print('---> Feature engineering')\n train_df, test_df = feature_engineering(train_df, test_df)\n print('---> Preparing data for training')\n train_df, test_df = pre_train_data_prep(train_df, test_df, model_type, timestamp, target)\n\n return train_df.copy(), test_df.copy()\n\n\ndef get_raw_data_from_local(path):\n\n \"\"\"\n Function to get the original data from local\n\n Args:\n path (str): Data path.\n\n Returns:\n DataFrame. Dataset with the input data.\n \"\"\"\n\n df = pd.read_csv(path)\n return df.copy()\n\n\ndef transform_data(train_df, test_df, timestamp, target, cols_to_remove):\n\n \"\"\"\n Function that allows performing the first transformation tasks\n of input data.\n\n Args:\n train_df (DataFrame): Train dataset.\n test_df (DataFrame): Test dataset.\n timestamp (float): Temporary representation in seconds.\n target (str): Dependent variable to use.\n cols_to_remove (list): Columns to remove.\n\n Returns:\n DataFrame, DataFrame. Train and test datasets for the model.\n \"\"\"\n\n # Removing unusable columns\n print('------> Removing unnecessary columns')\n train_df = remove_unwanted_columns(train_df, cols_to_remove)\n test_df = remove_unwanted_columns(test_df, cols_to_remove)\n\n # Removing null values in the target variable\n print('------> Removing missing targets')\n train_df = remove_missing_targets(train_df, target)\n test_df = remove_missing_targets(test_df, target)\n\n # Type change\n train_df['Pclass'] = train_df['Pclass'].astype(str)\n test_df['Pclass'] = test_df['Pclass'].astype(str)\n\n\n # We separate the target variable before encoding\n train_target = train_df[target].copy()\n test_target = test_df[target].copy()\n train_df.drop(columns=[target], inplace=True)\n test_df.drop(columns=[target], inplace=True)\n\n # Generation of dummies\n print('------> Encoding data')\n train_df = pd.get_dummies(train_df)\n test_df = pd.get_dummies(test_df)\n # alineación de train y test para tener las mismas columnas\n train_df, test_df = train_df.align(test_df, join='inner', axis=1)\n\n # Saving the resulting columns to IBM COS\n print('---------> Saving encoded columns')\n cos.save_object_in_cos(train_df.columns, 'encoded_columns', timestamp)\n\n #\"we rejoin the target variable to the datasets\n train_df.reset_index(drop=True, inplace=True)\n test_df.reset_index(drop=True, inplace=True)\n train_target.reset_index(drop=True, inplace=True)\n test_target.reset_index(drop=True, inplace=True)\n train_df = train_df.join(train_target)\n test_df = test_df.join(test_target)\n\n return train_df.copy(), test_df.copy()\n\n\ndef pre_train_data_prep(train_df, test_df, model_type, timestamp, target):\n \"\"\"\n Function that performs the last transformations on the data\n before training (null imputation and scaling)\n\n Args:\n train_df (DataFrame): Train dataset.\n test_df (DataFrame): Test dataset.\n model_type (str): Type of model used.\n timestamp (float): Temporary representation in seconds.\n target (str): Dependent variable to use.\n\n Returns:\n DataFrame, DataFrame. Datasets de train y test para el modelo.\n \"\"\"\n\n # Separamos la variable objetivo antes de la imputación y escalado\n train_target = train_df[target].copy()\n test_target = test_df[target].copy()\n train_df.drop(columns=[target], inplace=True)\n test_df.drop(columns=[target], inplace=True)\n\n # imputación de nulos\n print('------> Inputing missing values')\n train_df, test_df = input_missing_values(train_df, test_df, timestamp)\n\n # restringimos el escalado solo a ciertos modelos\n if model_type.upper() in ['SVM', 'KNN', 'NaiveBayes']:\n print('------> Scaling features')\n train_df, test_df = scale_data(train_df, test_df)\n\n # volvemos a unir la variable objetivo a los datasets\n train_df.reset_index(drop=True, inplace=True)\n test_df.reset_index(drop=True, inplace=True)\n train_target.reset_index(drop=True, inplace=True)\n test_target.reset_index(drop=True, inplace=True)\n train_df = train_df.join(train_target)\n test_df = test_df.join(test_target)\n\n return train_df.copy(), test_df.copy()\n\n\ndef input_missing_values(train_df, test_df, timestamp):\n \"\"\"\n Función para la imputación de nulos\n\n Args:\n train_df (DataFrame): Dataset de train.\n test_df (DataFrame): Dataset de test.\n timestamp (float): Temporary representation in seconds.\n\n Returns:\n DataFrame, DataFrame. Train and test datasets for the model.\n \"\"\"\n # we create the imputer that will use the median as a substitute\n imputer = SimpleImputer(strategy='median')\n\n # we adjust the medians based on the train data\n train_df = pd.DataFrame(imputer.fit_transform(train_df), columns=train_df.columns)\n # we impute the test data\n test_df = pd.DataFrame(imputer.transform(test_df), columns=test_df.columns)\n\n # we save the imputator for future new data\n print('------> Saving imputer on the cloud')\n cos.save_object_in_cos(imputer, 'imputer', timestamp)\n\n return train_df.copy(), test_df.copy()\n\n\ndef remove_unwanted_columns(df, cols_to_remove):\n \"\"\"\n Function to remove unnecessary variables\n\n Args:\n df (DataFrame): Dataset.\n\n Returns:\n DataFrame. Dataset.\n \"\"\"\n return df.drop(columns=cols_to_remove)\n\n\ndef remove_missing_targets(df, target):\n \"\"\"\n Function to remove null values in the target variable\n\n Args:\n df (DataFrame): Dataset.\n\n Returns:\n DataFrame. Dataset.\n \"\"\"\n return df[~df[target].isna()].copy()\n\n\ndef scale_data(train_df, test_df):\n \"\"\"\n Variable scaling function\n\n Args:\n train_df (DataFrame): Train dataset.\n test_df (DataFrame): Test dataset.\n\n Returns:\n DataFrame, DataFrame. Train and test datasets for the model.\n \"\"\"\n\n # scaling object in range (0,1)\n scaler = MinMaxScaler(feature_range=(0, 1))\n # fit and transform on train data\n train_df = scaler.fit_transform(train_df)\n # test data scaling\n test_df = scaler.transform(test_df)\n\n return train_df.copy(), test_df.copy()\n\n\n","repo_name":"fran-roca/titanic_train","sub_path":"app/src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":7441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18786397367","text":"#osa10-1\n\"\"\"\nDefines Tietokone class with the attributes malliand nopeus.\nMake KannettavaTietokone class that inherits the class Computer.\nIn addition to the attributes of Tietokone, the class gets a third integer type attribute in the constructor paino.\nIn addition, write a method in the class __str__that allows you to print a printout of the state of the object\naccording to the example embodiment.\n\"\"\"\nclass Tietokone:\n def __init__(self, malli: str, nopeus: int):\n self.__malli = malli\n self.__nopeus = nopeus\n\n def malli(self):\n return self.__malli\n\n def nopeus(self):\n return self.__nopeus\n\nclass KannettavaTietokone(Tietokone):\n def __init__(self, malli: str, nopeus: int, paino: int):\n super().__init__(malli, nopeus)\n self.__paino = paino\n \n def paino(self):\n return self.__paino\n \n def __str__(self):\n return \"{}, {} MHz, {} kg\".format(super().malli(), super().nopeus(), self.paino())\n\nif __name__ == \"__main__\":\n ipm = KannettavaTietokone(\"IPM MikroMauri\", 1500, 2)\n print(ipm) # IPM MikroMauri, 1500 MHz, 2 kg\n\n#osa10-2\n\"\"\"\nMake Pelimuseo class inherits the Pelivarasto class.\nIn the Game Museum category, the method is re-implemented anna_pelit()\nso that it returns only games made before 1990 to the list.\nIn addition, the class must have a constructor called the superclass Game Store constructor.\nThe constructor has no parameters.\n\"\"\"\nclass Tietokonepeli:\n def __init__(self, nimi: str, julkaisija: str, vuosi: int):\n self.nimi = nimi\n self.julkaisija = julkaisija\n self.vuosi = vuosi\n\nclass Pelivarasto:\n def __init__(self):\n self.__pelit = []\n\n def lisaa_peli(self, peli: Tietokonepeli):\n self.__pelit.append(peli)\n\n def anna_pelit(self):\n return self.__pelit\n\nclass Pelimuseo(Pelivarasto):\n def __init__(self):\n super().__init__()\n \n def lisaa_peli(self, peli: Tietokonepeli):\n super().lisaa_peli(peli)\n \n def anna_pelit(self):\n proper_game_list = []\n for game in super().anna_pelit():\n if game.vuosi <= 1990:\n proper_game_list.append(game)\n return proper_game_list\n\nif __name__ == \"__main__\":\n museo = Pelimuseo()\n museo.lisaa_peli(Tietokonepeli(\"Pacman\", \"Namco\", 1980))\n museo.lisaa_peli(Tietokonepeli(\"GTA 2\", \"Rockstar\", 1999))\n museo.lisaa_peli(Tietokonepeli(\"Bubble Bobble\", \"Taito\", 1986))\n for peli in museo.anna_pelit():\n print(peli.nimi) \n# Output:\n# Pacman\n# Bubble Bobble\n\n#osa10-3\n\"\"\"\nImplement a class Nelio that inherits the class Suorakulmio.\nUnlike rectangle square all sides are the same length, that is,\nthe square is a kind of simpler special case of a rectangle.\nThe class must not define new attributes!\n\"\"\"\nclass Suorakulmio:\n def __init__(self, leveys: int, korkeus: int):\n self.leveys = leveys\n self.korkeus = korkeus\n\n def __str__(self):\n return f\"suorakulmio {self.leveys}x{self.korkeus}\"\n\n def pinta_ala(self):\n return self.leveys * self.korkeus\n\nclass Nelio(Suorakulmio):\n def __init__(self, side_length: int):\n super().__init__(side_length, side_length)\n \n def __str__(self):\n return \"neliö {}x{}\".format(self.leveys, self.korkeus)\n \n def pinta_ala(self):\n super().pinta_ala()\n return self.leveys * self.korkeus\n\nif __name__ == \"__main__\":\n suorakulmio = Suorakulmio(2, 3)\n print(suorakulmio) # suorakulmio 2x3\n print(\"pinta-ala:\", suorakulmio.pinta_ala()) # pinta-ala: 6\n\n nelio = Nelio(4)\n print(nelio) # neliö 4x4\n print(\"pinta-ala:\", nelio.pinta_ala()) # pinta-ala: 16\n\n#osa10-4\n\"\"\"\nImplement class PisinSana that inherits from class Sanapeli, which returns the winner with the longest word\nImplement class EnitenVokaaleja that inherits from class Sanapeli, which returns the winner with more vowels in the word\nImplement class KiviPaperiSakset that inherits from class Sanapeli,which returns the winner following stone, paper and scissors rules\n(Rock, Paper, Scissors) = (kivi, paperi, sakset)\n\"\"\"\nimport random\n\nclass Sanapeli():\n def __init__(self, kierrokset: int):\n self.voitot1 = 0\n self.voitot2 = 0\n self.kierrokset = kierrokset\n\n def kierroksen_voittaja(self, pelaaja1_sana: str, pelaaja2_sana: str):\n # arvotaan voittaja\n return random.randint(1, 2)\n\n def pelaa(self):\n print(\"Sanapeli:\")\n for i in range(1, self.kierrokset+1):\n print(f\"kierros {i}\")\n vastaus1 = input(\"pelaaja1: \")\n vastaus2 = input(\"pelaaja2: \")\n\n if self.kierroksen_voittaja(vastaus1, vastaus2) == 1:\n self.voitot1 += 1\n print(\"pelaaja 1 voitti\")\n elif self.kierroksen_voittaja(vastaus1, vastaus2) == 2:\n self.voitot2 += 1\n print(\"pelaaja 2 voitti\")\n else:\n pass # tasapeli\n\n print(\"peli päättyi, voitot:\")\n print(f\"pelaaja 1: {self.voitot1}\")\n print(f\"pelaaja 2: {self.voitot2}\")\n\nclass PisinSana(Sanapeli):\n def __init__(self, kierrokset: int):\n super().__init__(kierrokset)\n\n def kierroksen_voittaja(self, pelaaja1_sana: str, pelaaja2_sana: str):\n if len(pelaaja1_sana) > len(pelaaja2_sana):\n return 1\n elif len(pelaaja1_sana) < len(pelaaja2_sana):\n return 2\n else:\n pass\n\nclass EnitenVokaaleja(Sanapeli):\n def __init__(self, kierrokset: int):\n super().__init__(kierrokset)\n\n def kierroksen_voittaja(self, pelaaja1_sana: str, pelaaja2_sana: str):\n counter1 = pelaaja1_sana.count('ä') + pelaaja1_sana.count('a') + pelaaja1_sana.count('å') + pelaaja1_sana.count('e') + pelaaja1_sana.count('i') + pelaaja1_sana.count('o') + pelaaja1_sana.count('ö') + pelaaja1_sana.count('u') + pelaaja1_sana.count('y')\n counter2 = pelaaja2_sana.count('ä') + pelaaja2_sana.count('a') + pelaaja2_sana.count('å') + pelaaja2_sana.count('e') + pelaaja2_sana.count('i') + pelaaja2_sana.count('o') + pelaaja2_sana.count('ö') + pelaaja2_sana.count('u') + pelaaja2_sana.count('y')\n if counter1 > counter2:\n return 1\n elif counter1 < counter2:\n return 2\n else:\n pass\n\nclass KiviPaperiSakset(Sanapeli):\n def __init__(self, kierrokset: int):\n super().__init__(kierrokset)\n \n def kierroksen_voittaja(self, pelaaja1_sana: str, pelaaja2_sana: str):\n if pelaaja1_sana in [\"kivi\", \"paperi\", \"sakset\"] and pelaaja2_sana in [\"kivi\", \"paperi\", \"sakset\"]:\n if pelaaja1_sana == \"kivi\": # rock\n if pelaaja2_sana == \"paperi\":\n return 2\n elif pelaaja2_sana == \"sakset\":\n return 1\n else:\n pass\n elif pelaaja1_sana == \"paperi\": # paper\n if pelaaja2_sana == \"kivi\":\n return 1\n elif pelaaja2_sana == \"sakset\":\n return 2\n else:\n pass\n elif pelaaja1_sana == \"sakset\": # scissors\n if pelaaja2_sana == \"kivi\":\n return 2\n elif pelaaja2_sana == \"paperi\":\n return 1\n else:\n pass\n else:\n pass\n elif pelaaja1_sana in [\"kivi\", \"paperi\", \"sakset\"] and pelaaja2_sana not in [\"kivi\", \"paperi\", \"sakset\"]:\n return 1\n elif pelaaja1_sana not in [\"kivi\", \"paperi\", \"sakset\"] and pelaaja2_sana in [\"kivi\", \"paperi\", \"sakset\"]:\n return 2\n else:\n pass\n\n\n\nif __name__ == \"__main__\":\n p = KiviPaperiSakset(3)\n p.pelaa()\n\n\"\"\"\nExample:\nyanjing@yanjingdeMacBook-Pro src % cd /Users/yanjing/Downloads/osa10/*/src; python3 *.py\nSanapeli:\nkierros 1\npelaaja1: kivi\npelaaja2: laiva\npelaaja 1 voitti\nkierros 2\npelaaja1: dynamiitti\npelaaja2: sakset\npelaaja 2 voitti\nkierros 3\npelaaja1: auto\npelaaja2: mopo\npeli päättyi, voitot:\npelaaja 1: 1\npelaaja 2: 1\n\"\"\"\n\n#osa10-5\n\"\"\"\nA class is ready in the task template SuperSankari.\nWrite a class SuperRyhma that models a group of superheroes.\nThe class must have the following characteristics:\n- Protected attributes name (string), domicile (string) and members (list)\n- A constructor that gets its name and domicile as its parameter in this order\n- Detection methods for name and domicile\n- A method lisaa_jasen(sankari: SuperSankari)that adds a new member to a group\n- A method tulosta_ryhmathat prints the information for a group and its members according to the example below\n\nExpected output:\nOutput:\nRyhmä Z, Kälviä\nJäsenet:\nSupermiekkonen, superkyvyt: Supernopeus, supervoimakkuus\nNäkymätön Makkonen, superkyvyt: Näkymättömyys\n\"\"\"\nclass SuperSankari:\n def __init__(self, nimi: str, supervoimat: str):\n self.nimi = nimi\n self.supervoimat = supervoimat\n\n def __str__(self):\n return f'{self.nimi}, superkyvyt: {self.supervoimat}'\n\nclass SuperRyhma():\n def __init__(self, nimi: str, kotipaikka: str):\n self._nimi = nimi\n self._kotipaikka = kotipaikka\n self._jasenet = []\n \n def nimi(self):\n return self._nimi\n \n def kotipaikka(self):\n return self._kotipaikka\n \n def lisaa_jasen(self, sankari: SuperSankari):\n self._jasenet.append(sankari)\n \n def tulosta_ryhma(self):\n print(\"{}, {}\".format(self.nimi(), self.kotipaikka()))\n print(\"Jäsenet:\")\n for element in self._jasenet:\n print(\"{}, superkyvyt: {}\".format(element.nimi, element.supervoimat))\n\nif __name__ == \"__main__\":\n supermiekkonen = SuperSankari(\"Supermiekkonen\", \"Supernopeus, supervoimakkuus\")\n nakymaton = SuperSankari(\"Näkymätön Makkonen\", \"Näkymättömyys\")\n ryhma_z = SuperRyhma(\"Ryhmä Z\", \"Kälviä\")\n\n ryhma_z.lisaa_jasen(supermiekkonen)\n ryhma_z.lisaa_jasen(nakymaton)\n ryhma_z.tulosta_ryhma()\n\n#osa10-6\n\"\"\"\nImplement SalainenTaikajuoma class that inherits from Taikajuoma class with the following characteristics:\n- SalainenTaikajuoma class receives a password (salasana) in the constructor.\n- The method lisaa_aines(ainesosa: str, maara: float, salasana: str) can be only called successfully with right password, otherwise, will raise ValueError exception.\n- The method tulosta_resepti(salasana: str) can be only called successfully with right password, otherwise, will raise ValueError exception.\n\nExpected output:\nyanjing@yanjingdeMacBook-Pro src % cd /Users/yanjing/Downloads/osa10/*/src; python3 *.py\nKutistus maksimus:\nKärpässieni 1.5 grammaa\nTaikahiekka 3.0 grammaa\nSammakonkutu 4.0 grammaa\nTraceback (most recent call last):\n File \"salainen_taikajuoma.py\", line 41, in \n kutistus.lisaa_aines(\"Sammakonkutuhhhhhh\", 8.0, \"hokkuspokkussss\")\n File \"salainen_taikajuoma.py\", line 27, in lisaa_aines\n raise ValueError(\"Väärä salasana!\")\nValueError: Väärä salasana!\n\"\"\"\nclass Taikajuoma:\n def __init__(self, nimi: str):\n self._nimi = nimi\n self._ainekset = []\n\n def lisaa_aines(self, ainesosa: str, maara: float):\n self._ainekset.append((ainesosa, maara))\n\n def tulosta_resepti(self):\n print(self._nimi + \":\")\n for aines in self._ainekset:\n print(f\"{aines[0]} {aines[1]} grammaa\")\n\nclass SalainenTaikajuoma(Taikajuoma):\n def __init__(self, nimi: str, salasana: str):\n super().__init__(nimi)\n self._salasana = salasana\n \n def salasana(self):\n return self._salasana\n \n def lisaa_aines(self, ainesosa: str, maara: float, salasana: str):\n if self.salasana() == salasana:\n super().lisaa_aines(ainesosa, maara)\n else:\n raise ValueError(\"Väärä salasana!\")\n\n def tulosta_resepti(self, salasana: str):\n if self.salasana() == salasana:\n super().tulosta_resepti()\n else:\n raise ValueError(\"Väärä salasana!\")\n\nif __name__ == \"__main__\":\n kutistus = SalainenTaikajuoma(\"Kutistus maksimus\", \"hokkuspokkus\")\n kutistus.lisaa_aines(\"Kärpässieni\", 1.5, \"hokkuspokkus\")\n kutistus.lisaa_aines(\"Taikahiekka\", 3.0, \"hokkuspokkus\")\n kutistus.lisaa_aines(\"Sammakonkutu\", 4.0, \"hokkuspokkus\")\n kutistus.tulosta_resepti(\"hokkuspokkus\")\n kutistus.lisaa_aines(\"Sammakonkutuhhhhhh\", 8.0, \"hokkuspokkussss\")\n kutistus.tulosta_resepti(\"pokkushokkus\") # VÄÄRÄ SALASANA!\n\n#osa10-7\n\"\"\"\nImplement the following methods for Raha class:\n- __init__(self) method to construct the integer and decimal part of the money\n- __str__(self) method to make sure the value of money is printed in the right format\n- __eq__, __lt__, __gt__, __ne__ to compare the two different amounts of money\n- __add__, __sub__ to operate the two different amounts of money, and return the object\n\nExpected output:\n7.70 eur\n1.80 eur\nTraceback (most recent call last):\n File \"raha.py\", line 58, in \n e5 = e2-e1\n File \"raha.py\", line 46, in __sub__\n raise ValueError(\"negatiivinen tulos ei sallittu\")\nValueError: negatiivinen tulos ei sallittu\n\"\"\"\nclass Raha:\n def __init__(self, eurot: int, sentit: int):\n self._eurot = eurot\n self._sentit = sentit\n\n def __str__(self):\n if self._sentit < 10:\n return f\"{self._eurot}.0{self._sentit} eur\"\n else:\n return f\"{self._eurot}.{self._sentit} eur\"\n\n def __repr__(self):\n if self._sentit < 10:\n return float(\"{}.0{}\".format(self._eurot, self._sentit))\n else:\n return float(\"{}.{}\".format(self._eurot, self._sentit))\n\n def __eq__(self, toinen):\n return self.__repr__() == toinen.__repr__()\n \n def __lt__(self, toinen):\n return self.__repr__() < toinen.__repr__()\n \n def __gt__(self, toinen):\n return self.__repr__() > toinen.__repr__()\n \n def __ne__(self, toinen):\n return self.__repr__() != toinen.__repr__()\n \n def __add__(self, toinen):\n result = round(self.__repr__() + toinen.__repr__(), 2)\n add_result = Raha(0, 0)\n add_result._eurot = int(result)\n add_result._sentit = int(round(result - add_result._eurot, 2) * 100)\n return add_result\n\n def __sub__(self, toinen):\n result = round(self.__repr__() - toinen.__repr__(), 2)\n if result >= 0:\n sub_result = Raha(0, 0)\n sub_result._eurot = int(result)\n sub_result._sentit = int(round(result - sub_result._eurot, 2) * 100)\n return sub_result\n else:\n raise ValueError(\"negatiivinen tulos ei sallittu\")\n\nif __name__ == \"__main__\":\n e1 = Raha(4, 75)\n e2 = Raha(2, 95)\n\n e3 = e1 + e2\n e4 = e1 - e2\n\n print(e3)\n print(e4)\n\n e5 = e2-e1\n\n#osa10-8\n\"\"\"\nImplement Paivays class with the following characteristics:\n- makes it possible to handle dates, but do not use datetime module, and assume every month has 30 days\n- Implement the class body and its comparison operators <,>, == and! =\n- Implement operator + for the date, and do not change the original date\n- Implement an operator for the date that returns the difference in dates in days\n\"\"\"\nclass Paivays:\n def __init__(self, date: int, month: int, year: int):\n self.date = date\n self.month = month\n self.year = year\n \n def __str__(self):\n return \"{}.{}.{}\".format(self.date, self.month, self.year)\n\n def __repr__(self):\n if self.date >= 10:\n if self.month >= 10:\n return \"{}{}{}\".format(self.year, self.month, self.date)\n else:\n return \"{}0{}{}\".format(self.year, self.month, self.date)\n else:\n if self.month >= 10:\n return \"{}{}0{}\".format(self.year, self.month, self.date)\n else:\n return \"{}0{}0{}\".format(self.year, self.month, self.date)\n \n def __eq__(self, toinen):\n return int(self.__repr__()) == int(toinen.__repr__())\n \n def __lt__(self, toinen):\n return int(self.__repr__()) < int(toinen.__repr__())\n \n def __gt__(self, toinen):\n return int(self.__repr__()) > int(toinen.__repr__())\n \n def __ne__(self, toinen):\n return int(self.__repr__()) != int(toinen.__repr__())\n \n # Assume every month has 30 days\n def __add__(self, days: int):\n new_date = Paivays(0, 0, 0)\n new_date.year = self.year + days//360\n new_date.month = self.month + days%360//30\n new_date.date = self.date + days%360%30\n if new_date.date > 30:\n new_date.date = new_date.date - 30\n new_date.month = new_date.month + 1\n if new_date.month > 12:\n new_date.month = new_date.month - 12\n new_date.year = new_date.year + 1\n return new_date\n \n def __sub__(self, dateb):\n return abs((self.year-1)*360 + (self.month-1)*30 + self.date - (dateb.year-1)*360 - (dateb.month-1)*30 - dateb.date)\n\nif __name__ == \"__main__\":\n pv1 = Paivays(19, 9, 1976)\n pv2 = Paivays(9, 10, 1976)\n print(pv2>pv1) # True\n\n#osa10-9\n\"\"\"\nMake the class Kauppalista iterable and the code block in main function can work properly\n\"\"\"\nclass Kauppalista:\n def __init__(self):\n self.tuotteet = []\n\n def tuotteita(self):\n return len(self.tuotteet)\n\n def lisaa(self, tuote: str, maara: int):\n self.tuotteet.append((tuote, maara))\n\n def __iter__(self):\n self.n = 0\n return self\n \n def __next__(self):\n if self.n < self.tuotteita():\n element = self.tuotteet[self.n]\n self.n += 1\n return element\n else:\n raise StopIteration \n\nif __name__ == \"__main__\":\n lista = Kauppalista()\n lista.lisaa(\"banaanit\", 10)\n lista.lisaa(\"omenat\", 5)\n lista.lisaa(\"ananas\", 1)\n\n for tuote in lista:\n print(f\"{tuote[0]}: {tuote[1]} kpl\")","repo_name":"jingyan112/Python_TVT21A","sub_path":"osa10_all_script.py","file_name":"osa10_all_script.py","file_ext":"py","file_size_in_byte":18025,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10314511199","text":"from fastapi import FastAPI #import class FastAPI() từ thư viện fastapi\n\napp = FastAPI() # gọi constructor và gán vào biến app\n\n\n# @app.get(\"/\") # giống flask, khai báo phương thức get và url\n# async def root(): # do dùng ASGI nên ở đây thêm async, nếu bên thứ 3 không hỗ trợ thì bỏ async đi\n# return {\"message\": \"Hello World\"}\n\n\n\n@app.get(\"/users/{user_id}/items/{item_id}\")\nasync def read_user_item(user_id: int, item_id: str):\n item = {\"item_id\": item_id, \"owner_id\": user_id}\n return item","repo_name":"Ravenclaw-Hcmut/intern_yolov5","sub_path":"apidemo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42080013895","text":"from django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.utils.translation import gettext as _\n\n\nclass AuthLoginView(SuccessMessageMixin, LoginView):\n template_name = 'auth_form.html'\n next_page = reverse_lazy('home')\n success_message = _('You are log in')\n extra_context = {\n 'header': 'Sign In',\n 'button': 'Entry',\n }\n\n\nclass AuthLogoutView(SuccessMessageMixin, LogoutView):\n next_page = reverse_lazy('home')\n success_message = _('You are logout')\n\n def dispatch(self, request, *args, **kwargs):\n messages.info(self.request, self.success_message)\n return super().dispatch(request, *args, **kwargs)\n","repo_name":"jespy666/btr_rental","sub_path":"btr/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3138664592","text":"# Create a shapefile from a CSV.\n\nfrom osgeo import ogr, osr\n\ncsv_fn = r\"D:\\osgeopy-data\\Galapagos\\Galapagos Albatrosses.csv\"\nshp_fn = r\"D:\\osgeopy-data\\Galapagos\\albatross_dd.shp\"\nsr = osr.SpatialReference(osr.SRS_WKT_WGS84)\n\n# Create the shapefile with two attribute fields.\nshp_ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(shp_fn)\nshp_lyr = shp_ds.CreateLayer('albatross_dd', sr, ogr.wkbPoint)\nshp_lyr.CreateField(ogr.FieldDefn('tag_id', ogr.OFTString))\nshp_lyr.CreateField(ogr.FieldDefn('timestamp', ogr.OFTString))\nshp_row = ogr.Feature(shp_lyr.GetLayerDefn())\n\n# Open the csv and loop through each row.\ncsv_ds = ogr.Open(csv_fn)\ncsv_lyr = csv_ds.GetLayer()\nfor csv_row in csv_lyr:\n\n # Get the x,y coordinates from the csv and create a point geometry.\n x = csv_row.GetFieldAsDouble('location-long')\n y = csv_row.GetFieldAsDouble('location-lat')\n shp_pt = ogr.Geometry(ogr.wkbPoint)\n shp_pt.AddPoint(x, y)\n\n # Get the attribute data from the csv.\n tag_id = csv_row.GetField('individual-local-identifier')\n timestamp = csv_row.GetField('timestamp')\n\n # Add the data to the shapefile.\n shp_row.SetGeometry(shp_pt)\n shp_row.SetField('tag_id', tag_id)\n shp_row.SetField('timestamp', timestamp)\n shp_lyr.CreateFeature(shp_row)\n\ndel csv_ds, shp_ds\n","repo_name":"cgarrard/osgeopy-code","sub_path":"Chapter7/listing7_5.py","file_name":"listing7_5.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"81"} +{"seq_id":"37678990977","text":"#! python3\n# censorCreditCard.py - Finds credit card numbers and censors all digits but final 4 \n\nimport pyperclip, re\n\ntext = str(pyperclip.paste())\n\nccRegex = re.compile(r'\\d{14,17}') \n\nmatches = []\n\nfor cc in ccRegex.findall(text):\n matches.append(cc)\n\nstringMatches = '\\n'.join(matches)\n\ncensoredRegex = ccRegex.sub(r'***** ******', stringMatches)\n\n\n\nprint(censoredRegex)\n\n\n\n\n\n\n","repo_name":"Josh-Woodcock/python-scripts","sub_path":"censoredCCDetails.py","file_name":"censoredCCDetails.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38806507082","text":"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nfrom src.dataset_management import filemanager\nfrom src.dataset_management import towfileseparator\n\nfrom pathlib import Path\n\nbasePath = Path(__file__).parent.parent.parent.as_posix()\nprint(basePath)\n\npath = basePath + \"/Dataset/iskra/\"\nsaving_path = basePath + \"/Dataset/csv_files/\"\nsaving_path2 = basePath + \"/Dataset/csv_files_tagged/\"\n\nfilemanager.get_all_csv(path)\ntowfileseparator.separate_tows(path, saving_path)\n\nfilenames = os.listdir(saving_path)\nfilenames.sort()\nfor csv in filenames:\n dataframe = pd.read_csv(saving_path + csv)\n if len(dataframe) > 1:\n print(csv)\n fig, axs = plt.subplots(2)\n axs[0].set_title(\"Apertura(m)\")\n try:\n axs[0].plot(dataframe[\"Datos1(Fa)Estribor\"])\n except:\n try:\n axs[0].plot(dataframe[\"Datos1(m)Estribor\"])\n except:\n axs[0].plot(dataframe[\"Datos1(°)Estribor\"])\n axs[0].set_ylim([0, None])\n axs[1].set_title(\"Profundidade(m)\")\n #axs[1].set_ylim([0, None])\n axs[1].plot(dataframe[\"Escalas(m)Estribor\"])\n plt.pause(0.05)\n bad_input = True\n while(bad_input):\n embarra = input(\"Embarra? ->\")\n if embarra == \"1\" or embarra == \"0\":\n bad_input = False\n csv = csv + \"-embarra=\" + embarra + \".csv\"\n dataframe.to_csv(saving_path2 + csv)\n elif embarra == \"2\":\n os.remove(saving_path + csv)\n print(saving_path + csv + \" was removed\")\n bad_input = False\n else:\n print(\"Wrong input\")\n plt.show()\n plt.close()\n","repo_name":"AlejandroFernandezLuces/TFG","sub_path":"TFG-Research/src/dataset_management/manualtagger.py","file_name":"manualtagger.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"1865003480","text":"import random\n\nminimum = int(input(\"Enter the lower range\"))\nmaximum = int(input(\"Enter å higher range\"))\nmaxTries = 5\n\nmyRandomNumber = (random.randint(minimum, maximum))\nprint(\"Guess a number betwin\",minimum,\"and\", maximum)\ntheNumberGuessed = int(input())\n#print(myRandomNumber)\ntries = 0\n\nwhile(tries < maxTries):\n if(theNumberGuessed != myRandomNumber):\n print(\"Incorrct : Try again!\")\n tries = tries + 1\n theNumberGuessed = int(input(\"Guess again\"))\n if(theNumberGuessed == myRandomNumber):\n print(\"Wohoo!!\")\n exit(0)\n\n\nprint(\"Unfortunately all the guesses were Wrong\")\n\n#What if we enter numbers outside the range\n#Make a way to continue playing the game\n","repo_name":"jeff87b/Python-module-1","sub_path":"Day 1-5/011 New python file 4.py","file_name":"011 New python file 4.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5969426828","text":"import frappe\nfrom frappe.core.doctype.communication.email import get_attach_link\nfrom frappe.utils import get_url\nfrom frappe.core.utils import get_parent_doc\nfrom requests.utils import requote_uri\n\n@frappe.whitelist()\ndef set_access_token(name, token):\n frappe.db.set_value('Sales Invoice', name, 'access_token', token)\n\n\n@frappe.whitelist()\ndef get_attachment_link(doc, print_format):\n doc = frappe.get_doc('Sales Invoice', doc)\n setattr(doc, 'reference_doctype', 'Sales Invoice')\n setattr(doc, 'reference_name', doc.name)\n\n key = doc.get_signature()\n\n # Not Supported in python 2\n # link = f'{ get_url() }/{ doc.doctype }/{ doc.name }?format={ print_format}&key={ key }'\n link = \"{url}/{doctype}/{name}?format={print_format}&key={key}\".format(\n url = get_url(),\n doctype = doc.doctype,\n name = doc.name,\n print_format = print_format,\n key = key\n )\n link = requote_uri(link)\n return link","repo_name":"mhbu50/genome","sub_path":"genome/api/sales_invoice.py","file_name":"sales_invoice.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3227604062","text":"from setuptools import find_packages, setup\nfrom pathlib import Path\n\n# Project metadata\nNAME = 'minigun-soren-n'\nDESCRIPTION = 'A library for property-based testing of Python programs.'\nURL = 'https://github.com/soren-n/minigun'\nOTHER_URL = {\n \"Bug Tracker\": 'https://github.com/soren-n/minigun/issues',\n}\nEMAIL = 'sorennorbaek@gmail.com'\nAUTHOR = 'Soren Norbaek'\nREQUIRES_PYTHON = '>=3.10.1'\n\n# Define long description\nreadme_path = Path('README.md')\nwith readme_path.open('r', encoding = 'utf-8') as readme_file:\n LONG_DESCRIPTION = '\\n%s' % readme_file.read()\n\n# Define version\ninit_path = Path('minigun/__init__.py')\nwith init_path.open('r', encoding = 'utf-8') as init_file:\n VERSION = init_file.readline().split(' = ')[1][1:-2]\n\n# Read requirements\nrequirements_path = Path('requirements.txt')\ninstall_requires = []\nwith open(requirements_path) as requirements_file:\n install_requires = requirements_file.readlines()\n\nsetup(\n name = NAME,\n license = 'MIT',\n version = VERSION,\n description = DESCRIPTION,\n long_description = LONG_DESCRIPTION,\n long_description_content_type = 'text/markdown',\n url = URL,\n author = AUTHOR,\n author_email = EMAIL,\n requires_python = REQUIRES_PYTHON,\n install_requires = install_requires,\n packages = find_packages(exclude = [\"tests\", \"docs\", \"examples\"]),\n entry_points = {},\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.10'\n ]\n)\n","repo_name":"soren-n/minigun-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5429065089","text":"#!/usr/bin/python3\nfrom pyspark.sql import SparkSession\n\nif __name__ == \"__main__\":\n # Create SparkSession\n spark = SparkSession \\\n .builder \\\n .getOrCreate()\n\n # Create a DataFrame from a JSON file\n memberProfile_df = spark.read.csv(\"hdfs://localhost/user/cloudera/SparkSQLBehaviorsAnalysis4Linechatbot/data/memberProfile4MySQL\", header=True)\n\n\n \n\n\n#========================= (Launch Spark by 「Interactive Development Environment」)\n# pyspark --master spark://172.21.0.2:7077\n\n#========================= (Launch Spark by 「Shell Script」)\n# spark-submit --master spark://172.21.0.2:7077 message_event_analysis.py\n\n","repo_name":"OnionTraveler/LinechatbotBehaviorsAnalysis4SparkSQL","sub_path":"analysisProject/memberProfile2df.py","file_name":"memberProfile2df.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33116349990","text":"###kalau mokad change sendiri:)\n\nimport json\nfrom urllib import request\n\nprint(\"Masukan nomor dengan format 8xxxx\")\nno = input(\"masukan no: \")\nurl = f\"https://python-api-zhirrr.herokuapp.com/api/spamcall?no={no}\"\nresponse = request.urlopen(url)\ndata = json.loads(response.read())\nprint (data['logs'])\n","repo_name":"mekoid/spamcall","sub_path":"spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38781939308","text":"\"\"\"Unittests for the models in frijay app\n\nModels to be tested: Event, Reservation\n\"\"\"\nimport unittest\nfrom unittest.mock import Mock\n\n\n# Create your tests here.\nclass UnittestEventModel(unittest.TestCase):\n \"\"\"Unittests for the Event Model\"\"\"\n def test_mock_string_representation(self):\n \"\"\"Unit test the string representation\n\n Mocks an Event model and checks if __str__() returns the title\n \"\"\"\n from frijay.models import Event\n\n mock_instance = Mock(spec=Event)\n mock_instance.title = \"My title\"\n self.assertEqual(Event.__str__(mock_instance), \"My title\")\n\n\nclass UnittestReservationModel(unittest.TestCase):\n \"\"\"Unittests for the Reservation Model\"\"\"\n def test_mock_string_representation(self):\n \"\"\"Unit test the string representation\n\n Mocks a Reservation model and checks if __str__() returns the\n Reservation's event title\n \"\"\"\n from frijay.models import Reservation\n\n mock_instance = Mock(spec=Reservation)\n mock_instance.event.name = \"My Event title\"\n self.assertEqual(Reservation.__str__(mock_instance),\n \"My Event title\")\n","repo_name":"Cartoonman/shabbattable","sub_path":"frijay/tests/tests_models.py","file_name":"tests_models.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30541718883","text":"# -*- coding: utf-8 -*-\n# Create your views here.\n\n####################\n####################################################\n####################################################\n#### Django --> REST Framework\n#######################################################\n# ###################\n####################################################\n####################\n###\nfrom django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.models import Group\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import AllowAny\nfrom accounts.serializers import UserSerializer, GroupSerializer\nfrom accounts.permissions import IsStaffOrTargetUser\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n####################################################\n############\n####################################################\n####################################################\n############\n####################################################\n\nfrom searcher.models import PostReadyOriginal, ProductSnapshotLive, ExcelToolData\nfrom accounts.serializers import PostReadyOriginalSerializer, ProductSnapshotLiveSerializer, ExcelToolDataSerializer\n\nclass PostReadyOriginalViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows PostReadyOriginal to be viewed or edited.\n \"\"\"\n queryset = PostReadyOriginal.objects.all()\n serializer_class = PostReadyOriginalSerializer\n\nclass ProductSnapshotLiveViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows ProductSnapshotLive to be viewed or edited.\n \"\"\"\n queryset = ProductSnapshotLive.objects.all()\n serializer_class = ProductSnapshotLiveSerializer\n\nclass ExcelToolDataViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows ExcelToolData to be viewed or edited.\n \"\"\"\n queryset = ExcelToolData.objects.all()\n serializer_class = ExcelToolDataSerializer\n\n\n#####\n#####\n\nfrom searcher.models import SupplierIngest, SupplierIngest404, ImageUpdate, LookletShotList\nfrom accounts.serializers import SupplierIngestSerializer, SupplierIngest404Serializer, ImageUpdateSerializer, LookletShotListSerializer\n\nclass SupplierIngestViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows SupplierIngest to be viewed or edited.\n \"\"\"\n queryset = SupplierIngest.objects.all()\n serializer_class = SupplierIngestSerializer\n\nclass SupplierIngest404ViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows SupplierIngest404 to be viewed or edited.\n \"\"\"\n queryset = SupplierIngest404.objects.all()\n serializer_class = SupplierIngest404Serializer\n\nclass ImageUpdateViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows ImageUpdate to be viewed or edited.\n \"\"\"\n queryset = ImageUpdate.objects.all()\n serializer_class = ImageUpdateSerializer\n\n\nclass LookletShotListViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows LookletShotList to be viewed or edited.\n \"\"\"\n queryset = LookletShotList.objects.all()\n serializer_class = LookletShotListSerializer\n\n\n## REST_FRAMEWORK Browsable views\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\n@api_view(['GET', 'POST', 'PUT'])\n@permission_classes((IsAuthenticated, ))\ndef image_update_list(request, pk=None, alt=1, colorstyle=None,updated_by=None):\n \"\"\"\n List all image_updates, or create a new image_update.\n \"\"\"\n try:\n updated_by = request.data['updated_by']\n except:\n updated_by = 'ingest01'\n pass\n\n if request.method == 'GET':\n image_updates = ImageUpdate.objects.all()\n serializer = ImageUpdateSerializer(image_updates, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ImageUpdateSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n elif request.method == 'PUT':\n if not colorstyle:\n colorstyle = request.data['colorstyle']\n try:\n image_update = ImageUpdate.objects.get(updated_by=updated_by,colorstyle=colorstyle,alt=alt)\n except ImageUpdate.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ImageUpdateSerializer(image_update, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET', 'PUT', 'POST'])\n@permission_classes((IsAuthenticated,))\ndef image_update_detail(request, format=None, pk=None,alt=1,colorstyle=None,updated_by=None):\n \"\"\"\n Retrieve, update or delete an ImageUpdate instance.\n \"\"\"\n try:\n if not colorstyle:\n colorstyle = request.GET['colorstyle']\n except:\n pass\n\n try:\n updated_by = request.data['updated_by']\n except:\n updated_by = 'djdam'\n pass\n\n try:\n image_update = ImageUpdate.objects.get(updated_by=updated_by,colorstyle=colorstyle, alt=1)\n except ImageUpdate.DoesNotExist:\n try:\n image_update = ImageUpdate.objects.get(updated_by=updated_by,colorstyle=colorstyle,alt=alt)\n except ImageUpdate.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ImageUpdateSerializer(image_update)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n if not colorstyle:\n colorstyle = request.data['colorstyle']\n image_update = ImageUpdate.objects.get(updated_by=updated_by,colorstyle=colorstyle, alt=alt)\n serializer = ImageUpdateSerializer(image_update, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'POST':\n serializer = ImageUpdateSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n # else if there do a put update\n else:\n image_update = ImageUpdate.objects.get(updated_by=updated_by, colorstyle=colorstyle, alt=alt)\n serializer = ImageUpdateSerializer(image_update, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'POST' and ImageUpdate.objects.get(colorstyle=colorstyle, alt=alt):\n if not colorstyle:\n colorstyle = request.data['colorstyle']\n image_update = ImageUpdate.objects.get(updated_by=updated_by,colorstyle=colorstyle, alt=alt)\n serializer = ImageUpdateSerializer(image_update, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n image_update = ImageUpdate.objects.get(colorstyle=colorstyle, alt=alt)\n image_update.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n## angular and rest\nfrom django.views.generic.base import TemplateView\n\n####### Angular API #######\n\nclass UserView(viewsets.ModelViewSet):\n serializer_class = UserSerializer\n model = User\n\n def get_permissions(self):\n # allow non-authenticated user to create via POST\n return (AllowAny() if self.request.method == 'POST'\n else IsStaffOrTargetUser()),\n\n###################################\n###################################\n\nfrom django.contrib.auth import login, logout\nfrom . import authentication\n\nclass AuthView(TemplateView):\n authentication_classes = (authentication.QuietBasicAuthentication,)\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response({})\n\n\n####################\n\nclass OnePageAppView(TemplateView):\n template_name = 'one_page_app.html'\n\n\n####################\n####################\n####################\nfrom rest_framework.authentication import TokenAuthentication, BasicAuthentication, SessionAuthentication\n@api_view(['GET', 'POST', 'PUT'])\n@permission_classes((IsAuthenticated, ))\n@authentication_classes(TokenAuthentication)\ndef looklet_shot_list_update_list(request,\n content_format=None,\n pk=None,\n colorstyle=None,\n photodate=None,\n reshoot=False,\n timestamp=None,\n username=None):\n \"\"\"\n List all looklet_shot_list_updates, or create a new looklet_shot_list_update.\n \"\"\"\n\n if request.method == 'GET':\n looklet_shot_list_updates = LookletShotList.objects.all()\n serializer = LookletShotListSerializer(looklet_shot_list_updates, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = LookletShotListSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'PUT':\n if not colorstyle:\n colorstyle = request.data['colorstyle']\n try:\n looklet_shot_list_update = LookletShotList.objects.get(colorstyle=colorstyle)\n except LookletShotList.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = LookletShotListSerializer(looklet_shot_list_update,\n data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET', 'PUT', 'POST'])\n@permission_classes((IsAuthenticated,))\n@authentication_classes(TokenAuthentication)\ndef looklet_shot_list_update_detail(request,\n content_format=None,\n pk=None,\n colorstyle=None,\n photodate=None,\n reshoot=False,\n timestamp=None,\n username=None):\n \"\"\"\n Retrieve, update or delete an LookletShotList instance.\n \"\"\"\n if not colorstyle:\n colorstyle = request.data['colorstyle']\n #photodate = request.data['photodate']\n pass\n\n looklet_shot_list_update = ''\n try:\n looklet_shot_list_update = LookletShotList.objects.get(colorstyle=colorstyle)\n except LookletShotList.DoesNotExist:\n pass\n # try:\n # looklet_shot_list_update = LookletShotList.objects.get(colorstyle=colorstyle)\n # except LookletShotList.DoesNotExist:\n # return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n if not looklet_shot_list_update:\n looklet_shot_list_updates = LookletShotList.objects.all()\n else:\n looklet_shot_list_updates = looklet_shot_list_update\n serializer = LookletShotListSerializer(looklet_shot_list_updates, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = LookletShotListSerializer(data=request.data)\n try:\n looklet_shot_list_update = LookletShotList.objects.get(colorstyle=colorstyle)\n pass\n except LookletShotList.DoesNotExist:\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'PUT':\n if not colorstyle:\n colorstyle = request.data['colorstyle']\n looklet_shot_list_update = LookletShotList.objects.get(colorstyle=colorstyle\n )\n serializer = LookletShotListSerializer(looklet_shot_list_update,\n data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n looklet_shot_list_update = LookletShotList.objects.get(colorstyle=colorstyle)\n looklet_shot_list_update.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n","repo_name":"connectthefuture/djdam165","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3811261341","text":"import struct\nimport btcp.constants\n\n\ndef create_segments(data, isn):\n \"\"\"\n Chop the data bytes into segments with max payload and return a list with all the segments.\n :param isn: The initial sequence number.\n \"\"\"\n segments = []\n seq_num = isn\n while len(data) > 0:\n segment = ascii_to_bytes(seq_num, 0, [False, False, False], 0, data[:btcp.constants.PAYLOAD_SIZE])\n segments.append(segment)\n seq_num += 1\n data = data[btcp.constants.PAYLOAD_SIZE:]\n return segments\n\n\ndef merge_segments(data):\n \"\"\"\n Merge the data together.\n :param data: Tuples with (seq_num, data bytes).\n :return: The data bytes sorted without the sequence numbers.\n \"\"\"\n return b''.join([data for (_, data) in sorted(data)])\n\n\ndef calculate_checksum(segment):\n \"\"\"\n Calculate the Internet Checksum as defined by RFC 1071. If the data is not divisible by 16 bits then the data is\n padded with zeros until it is.\n :return: The Internet Checksum computed over the given data (with padding).\n \"\"\"\n if len(segment) % 2 != 0:\n segment += b'\\x00'\n\n checksum = 0\n for pair in range(0, len(segment), 2):\n current = (segment[pair] << 8) + segment[pair + 1]\n if checksum + current >= 2**16:\n checksum = (checksum + current) % 2**16 + 1\n else:\n checksum += current\n checksum = checksum ^ 0xffff\n return checksum.to_bytes(2, byteorder='big')\n\n\ndef valid_checksum(segment):\n \"\"\"\n Validate the Internet Checksum as defined by RFC 1071.\n :return: If the checksum is correct yes or no.\n \"\"\"\n return calculate_checksum(segment) == b'\\x00\\x00'\n\n\ndef flags_array_to_byte(flags):\n \"\"\"\n :param flags: Boolean array containing flags as follows [ACK,SYN,FIN].\n :return: A byte containing the flags in the first few bits.\n \"\"\"\n if len(flags) > 8:\n raise ValueError(\"The maximum size of the flags array is 8, this is exceeded.\")\n\n total = 0\n for index, flag in enumerate(flags):\n if flag:\n total += 1 * 2**index\n return total.to_bytes(1, byteorder='big')\n\n\ndef flags_byte_to_array(byte):\n \"\"\"\n :param byte: Byte containing flags in the first few bits.\n :return: Boolean array containing the flags as follows [ACK,SYN,FIN].\n \"\"\"\n if len(byte) != 1:\n raise ValueError(\"More or less than one byte are given.\")\n\n total = int.from_bytes(byte, byteorder='big')\n flags = []\n for index in range(2, -1, -1):\n if total - (2**index) >= 0:\n flags.append(True)\n total -= 2**index\n else:\n flags.append(False)\n flags.reverse()\n return flags\n\n\ndef ascii_to_bytes(seq_num, ack_num, flags, window_size, data):\n \"\"\"\n Create a segment from the data that is given. Note that the checksum and data length are calculated from this data.\n :param flags: Boolean array containing flags as follows [ACK,SYN,FIN].\n :param data: The data to be send, already as bytes (for easier data chopping with unicode characters).\n :return: A bytes segment.\n :raises ValueError: If one of the values is out of range.\n \"\"\"\n if seq_num > 0xffff or seq_num < 0x0000:\n raise ValueError(\"The sequence number is out of range: {}.\".format(seq_num))\n if ack_num > 0xffff or ack_num < 0x0000:\n raise ValueError(\"The acknowledgement number is out of range: {}.\".format(ack_num))\n if len(flags) < 3 or len(flags) > 8:\n raise ValueError(\"The size of the flags array is out of range: {}.\".format(len(flags)))\n if window_size > 0xff or window_size < 0x00:\n raise ValueError(\"The window size is out of range: {}.\".format(window_size))\n if len(data) > btcp.constants.PAYLOAD_SIZE:\n raise ValueError(\"The data size is out of range: {}.\".format(data))\n\n header = seq_num.to_bytes(2, byteorder='big')\n header += ack_num.to_bytes(2, byteorder='big')\n header += flags_array_to_byte(flags)\n header += window_size.to_bytes(1, byteorder='big')\n header += len(data).to_bytes(2, byteorder='big')\n header += calculate_checksum(header + b'\\x00\\x00' + data)\n return header + data\n\n\ndef bytes_to_ascii(segment):\n \"\"\"\n Extract all of the data values from a segment.\n :return: All the data values. The data value is of bytes and should only be decoded when all segments are received.\n :raises: ValueError If the checksum is not valid or if the data_length is different from the actual amount of data.\n \"\"\"\n seq_num = struct.unpack('>H', segment[0:2])[0]\n ack_num = struct.unpack('>H', segment[2:4])[0]\n flags = flags_byte_to_array(segment[4:5])\n window_size = struct.unpack('>B', segment[5:6])[0]\n data_length = struct.unpack('>H', segment[6:8])[0]\n data = segment[10:]\n\n if len(data) != data_length:\n raise ValueError(\"The data length is not equal to the actual amount of data.\")\n if not valid_checksum(segment):\n raise ValueError(\"The checksum is invalid.\")\n\n return seq_num, ack_num, flags, window_size, data\n\n","repo_name":"Borroot/btcp","sub_path":"src/btcp/btcp_socket.py","file_name":"btcp_socket.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42284674303","text":"from PIL import Image\r\n\r\n\r\ndef concatImage(images, mode=\"L\"):\r\n if not isinstance(images, list):\r\n raise Exception('images must be a list ')\r\n count = len(images)\r\n size = Image.fromarray(images[0]).size\r\n target = Image.new(mode, (size[0] * count, size[1] * 1))\r\n for i in range(count):\r\n image = Image.fromarray(images[i]).resize(size, Image.BILINEAR)\r\n target.paste(image, (i * size[0], 0, (i + 1) * size[0], size[1]))\r\n return target\r\n","repo_name":"YUHAN666/TensorFlow_HGA","sub_path":"utiles/concat_image.py","file_name":"concat_image.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41830438298","text":"import unittest\nfrom bookshelf.api_v2 import java\nfrom fabric.api import sudo, run\nfrom bookshelf.tests.api_v2.docker_based_tests import (\n with_ephemeral_container,\n prepare_required_docker_images\n)\n\n\nclass InstallOracleJavaTests(unittest.TestCase):\n\n @with_ephemeral_container(\n images=['ubuntu-vivid-ruby-ssh', 'ubuntu-trusty-ruby-ssh'])\n def test_install_oracle_java_installs_java_on_ubuntu(self, *args, **kwargs):\n java.install_oracle_java(distribution='ubuntu',\n java_version='8')\n\n self.assertRegexpMatches(\n run('java -version '),\n '.*java version \"1.8.*\".*'\n )\n\n @with_ephemeral_container(\n images=['ubuntu-vivid-ruby-ssh', 'ubuntu-trusty-ruby-ssh'])\n def test_install_oracle_java_raises_exception_on_failure(self,\n *args, **kwargs):\n sudo('echo > /etc/resolv.conf')\n with self.assertRaises(SystemExit) as cm:\n java.install_oracle_java(distribution='ubuntu',\n java_version='8')\n self.assertEqual(cm.exception.code, 1)\n\n\nif __name__ == '__main__':\n\n prepare_required_docker_images()\n unittest.main(verbosity=4, failfast=True)\n","repo_name":"pyBookshelf/bookshelf","sub_path":"bookshelf/tests/api_v2/test_java.py","file_name":"test_java.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"18319469680","text":"import spacy\nfrom spacy_syllables import SpacySyllables\n\nnlp = spacy.load(\"en_core_web_sm\")\nnlp.add_pipe(\"syllables\", after=\"tagger\", config={\"lang\": \"en_US\"})\n\ndef syllable_count(text):\n doc = nlp(text)\n data = [(token.text, token._.syllables, token._.syllables_count) for token in doc]\n\n cnt = 0\n cnt_dot = 0\n word_cnt = 0\n for d in data:\n cnt += d[2] if d[2] is not None else 0\n cnt_dot += 1 if d[0] == '.' else 0\n word_cnt += 1 if d[2] is not None else 0\n if data[-1][0] == '.':\n cnt_dot -= 1\n return data, word_cnt, cnt, cnt_dot\n\ndef speech_rate_syllable(talk_range, text=None, wpm_avg=0.4, dot_pause=0.5, spm_avg=0.2, ideal_length=32, ratio=((0.85, 1.0), (0.7, 0.85))):\n # length = samples.shape[0] / sample_rate\n length = 0\n for (s, e) in talk_range:\n length += (e-s) \n if text is None:\n w_time = s_time = ideal_length\n else:\n _, word_cnt, cnt, cnt_dot = syllable_count(text)\n w_time = word_cnt * wpm_avg + cnt_dot * dot_pause\n s_time = cnt * spm_avg + cnt_dot * dot_pause\n w_rate = length / w_time\n\n w_score = 0\n if ratio[0][0] <= w_rate <= ratio[0][1]:\n w_score = 4\n elif ratio[1][0] <= w_rate <= ratio[1][1]:\n w_score = 5\n else:\n w_score = 3\n s_rate = length / s_time\n s_score = 0\n if ratio[0][0] <= s_rate <= ratio[0][1]:\n w_score = 4\n elif ratio[1][0] <= s_rate <= ratio[1][1]:\n s_score = 5\n else:\n s_score = 3\n return w_score, w_rate, s_score, s_rate, w_time, length\n","repo_name":"jojuo123/pte_silience","sub_path":"syllable.py","file_name":"syllable.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72469987466","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.font_manager as font_manager\n\n\n#plt.rcParams['font.family'] = ['Times New Roman']\nplt.rcParams.update({'font.size': 22}) \nplt.rcParams.update({'figure.autolayout': True})\n\n\n#store file names\nfname = [\"tpcbih\",\"employee\",\"multiset\"]\n#the column in .csv file (each column one line)\nnum_range = [\"PG-Seq\",\"PG-Nat\"]\nl_color = [\"Deepskyblue\",\"Red\"]\n\npath_dir = \"/reproducibility/experiment-scripts/result/\"\n\nplt.figure(figsize=(8,5)) \nfor f in range(len(fname)):\n #current file name\n cur_fn = fname[f]\n #read data to pop\n pop = pd.read_csv(path_dir + cur_fn + \".csv\")\n width = 0.2 \n N = len(pop['Query'])\n x1 = np.arange(N)\n for i in range(len(num_range)):\n y1 = pop[num_range[i]]\n #if(f==0):\n plt.bar(x1+width*i, y1, width, alpha=0.5, label=num_range[i],color=l_color[i],bottom=0.01)\n plt.yscale(\"log\",basey=10)\n plt.legend(loc='best',prop={'size': 18}, ncol=2)\n if(f==0):\n plt.xticks(x1+width*i, ('Q1','Q5','Q6','Q7','Q8','Q9','Q12','Q14','Q19'))\n #plt.legend(loc=8,ncol=2,prop={'size': 6})\n elif(f==1):\n plt.xticks(x1+width*i, ('join1','join2','join3','join4','agg1','agg2','agg3','agg-join','diff1','diff2'))\n elif(f==2):\n plt.xticks(x1+width*i, ('1k','10k','100k','300k','500k','1000k','3000k'))\n\n plt.ylabel('Runtime',fontsize=30)\n plt.tick_params(axis='x', which='major', labelsize=14)\n plt.tick_params(axis='y', which='major', labelsize=14)\n\n #save each plot to pdf\n print (cur_fn + \".pdf\")\n #plt.savefig(\"./\" + cur_fn + \".pdf\",dpi=600,format='pdf');\n plt.savefig(path_dir + cur_fn + \".pdf\",format='pdf');\n #clean current plot data\n plt.clf();\n","repo_name":"IITDBGroup/2019-PVLDB-Reproducibility-Snapshot-Semantics-For-Temporal-Multiset-Relations","sub_path":"experiment-scripts/result/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"9238104608","text":"from aiogram import types, Dispatcher\nfrom app import keyboards as kb\nfrom app import database as db\nimport os\n\n\nasync def start(message: types.Message):\n await db.user_start_bd(message.from_user.id)\n await message.answer_sticker('CAACAgIAAxkBAAICN2ULFcy09FZNdAc29u-L9t4PPmZoAALEFAACVycAAUgj3EUU9pyDIDAE')\n\n if message.from_user.id == int(os.getenv('ADMIN_ID')):\n await message.answer('Здраствуйте администратор, рад вас видеть))', reply_markup=kb.markup_admin)\n else:\n await message.answer(\n f'Привет {message.from_user.first_name}!!! Я новый бот данечки который потихонечку становится полноценным Магазином)))',\n reply_markup=kb.markup)\n\n\nasync def katal(message: types.Message):\n await message.answer('Выберите тип товара', reply_markup=kb.cat_list_client)\n\n\nasync def read_items(message: types.Message):\n await db.sql_read_items(message)\n\n\nasync def bask(message: types.Message):\n await message.answer('Вот тебе Корзина')\n\n\nasync def info(message: types.Message):\n await message.answer('По вопросам пишите: @Dany_21it')\n\n\ndef reg_hendlers_client(dp: Dispatcher):\n dp.register_message_handler(start, commands=['start'])\n dp.register_message_handler(katal, text=['Каталог'])\n dp.register_message_handler(read_items, text=['Под-системы', 'Испарители', 'Жижа'])\n dp.register_message_handler(bask, text=['Корзина'])\n dp.register_message_handler(info, text=['Контакты'])\n","repo_name":"Danyfff/VapeShop_bot","sub_path":"handlers/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30541341649","text":"from termios import FF1\nimport skfuzzy as fuzz\nimport numpy as np\nfrom skfuzzy import control as ctrl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score, make_scorer, get_scorer_names\n\nnames =['S1Temp','S2Temp','S3Temp','S1Light','S2Light','S3Light','PIR1','PIR2','Persons','Overcrowded','Slope_CO2','Parts of the day']\n\ndf = pd.read_csv('data_preprocessed.csv',usecols=names)\n\n\n\ndf['AvTemp'] = 100*(df['S1Temp'] + df['S2Temp'])/2\ndf['AvLight'] = (df['S1Light'] + df['S2Light'] + df['S3Light'])/3\n\n\n\ndf['2nd_slope'] = df['Slope_CO2'].diff(periods=50)\ndf = df.round()\ndf['AvPIR'] = (df['PIR1'] + df['PIR2'])/2\ndf['AvPIR'] = df['PIR1']\n#print(df)\n#print(df['Parts of the day'].max())\n\n#input\n\nX = df[['AvTemp','AvLight','2nd_slope','Slope_CO2']].to_numpy()\ny = df[[\"Overcrowded\"]].to_numpy()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42) \n\ny_train= y_train.astype('int')\ny_train=np.ravel(y_train)\n\ndf_x = pd.DataFrame(X_train, columns=[\"AvTemp\",\"AvLight\",\"Slope_CO2\",\"2nd_slope\"])\ndf_y = pd.DataFrame(y_train, columns=[\"Overcrowded\"])\n\ndf_x_test = pd.DataFrame(X_test, columns=[\"AvTemp\",\"AvLight\",\"Slope_CO2\",\"2nd_slope\"])\ndf_y_test = pd.DataFrame(y_test, columns=[\"Overcrowded\"])\n\ndf = pd.concat([df_x,df_y], axis = 1, join = 'inner')\ndf_test = pd.concat([df_x_test,df_y_test], axis = 1, join = 'inner')\n\nlight = ctrl.Antecedent(np.arange(0, 501, 1), 'light')\npir = ctrl.Antecedent(np.arange(0, 1.1, 0.1), 'pir')\nc02_slope = ctrl.Antecedent(np.arange(-300, 301, 1), 'c02_slope')\nc02_slope2nd = ctrl.Antecedent(np.arange(-300, 301, 1), 'c02_slope2nd')\ntemp = ctrl.Antecedent(np.arange(-100, 101, 1), 'temp')\n#output\novercrowded = ctrl.Consequent(np.arange(0, 2, 1), 'overcrowded')\n\n\n#input membership\ntemp['low'] = fuzz.trapmf(temp.universe, [-100, -100, -40,-20])\ntemp['medium'] = fuzz.trapmf(temp.universe, [-30, -20,20, 30])\ntemp['high'] = fuzz.trapmf(temp.universe, [20, 30, 100,100])\n\n\nlight['low'] = fuzz.trapmf(light.universe, [0, 0, 90,110])\nlight['medium_low'] = fuzz.trapmf(light.universe, [90, 110, 190,210])\nlight['medium_high'] = fuzz.trapmf(light.universe, [190, 210, 340,360])\nlight['high'] = fuzz.trapmf(light.universe, [340, 360, 500,500])\n\nc02_slope['negative'] = fuzz.trimf(c02_slope.universe, [-300, -300, 25])\nc02_slope['positive'] = fuzz.trimf(c02_slope.universe, [-25, 300, 300])\n\nc02_slope2nd['negative'] = fuzz.trapmf(c02_slope.universe, [-300, -300,-50, 0])\nc02_slope2nd['constante'] = fuzz.trapmf(c02_slope.universe, [-25, 0, 25,50])\nc02_slope2nd['positive'] = fuzz.trapmf(c02_slope.universe, [25, 75, 300,300])\n\npir['low'] = fuzz.trimf(pir.universe, [0, 0, 0.3])\npir['middle'] = fuzz.trimf(pir.universe, [0.2, 0.5, 0.8])\npir['high'] = fuzz.trimf(pir.universe, [0.7, 1, 1])\n\n#output membership\novercrowded['false'] = fuzz.trimf(overcrowded.universe, [0, 0, 1])\novercrowded['true'] = fuzz.trimf(overcrowded.universe, [0, 1, 1])\n\n#view membership\n\"\"\"\"\ntemp.view()\nlight.view()\npir.view()\nc02_slope.view()\nc02_slope2nd.view()\n#overcrowded.view()\nplt.show()\n\"\"\"\n############# rules\n\"\"\"rule1 = ctrl.Rule(light['high'],overcrowded['true'])\nrule2 = ctrl.Rule(light['medium_high'],overcrowded['false'])\nrule3 = ctrl.Rule(light['medium_low'],overcrowded['false'])\nrule4 = ctrl.Rule(light['low'],overcrowded['false'])\"\"\"\n\n\nrule1 = ctrl.Rule(c02_slope['positive'] & c02_slope2nd['positive'] & temp['high'],overcrowded['true'])\nrule2 = ctrl.Rule(light['medium_low'] & ~(c02_slope['positive'] & c02_slope2nd['positive'] & temp['high']),overcrowded['false'])\nrule3 = ctrl.Rule((light['medium_high']|light['high']) & c02_slope['positive'] & c02_slope2nd['positive'] & temp['medium'],overcrowded['true'])\nrule4 = ctrl.Rule((light['medium_high'] & ~(c02_slope['positive'] & c02_slope2nd['positive'] & (temp['medium']|temp['high']))),overcrowded['false'])\nrule5 = ctrl.Rule((light['high'] & temp['high']),overcrowded['true'])\nrule6 = ctrl.Rule((light['high'] & temp['low']) ,overcrowded['false'])\nrule7 = ctrl.Rule((light['high'] & c02_slope['negative'] & (c02_slope2nd['positive'] | c02_slope2nd['negative'])& temp['medium']),overcrowded['false'])\nrule8 = ctrl.Rule((light['high'] & (c02_slope['negative'] | c02_slope['positive']) & (c02_slope2nd['constante'] | c02_slope2nd['negative']) & temp['medium']),overcrowded['true'])\nrule9 = ctrl.Rule((light['low'] & c02_slope['positive'] & c02_slope2nd['positive'] & temp['medium']),overcrowded['true'])\nrule10 = ctrl.Rule((light['low'] & (c02_slope['negative'] | c02_slope2nd['negative'])),overcrowded['false'])\nrule11 = ctrl.Rule((light['low'] & c02_slope['positive'] & (c02_slope2nd['constante'] | c02_slope2nd['positive']) & temp['low']),overcrowded['false'])\nrule12 = ctrl.Rule((light['low'] & c02_slope['positive'] & c02_slope2nd['constante'] & temp['high']),overcrowded['false'])\nrule13 = ctrl.Rule((light['low'] & c02_slope['positive'] & c02_slope2nd['constante'] & temp['medium']),overcrowded['false'])\n\n\novercrowded_ctrl = ctrl.ControlSystem([rule1, rule2, rule3,rule4,rule5,rule6,rule7,rule8,rule9,rule10,rule11,rule12,rule13])\n#overcrowded_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4])\n\nover = ctrl.ControlSystemSimulation(overcrowded_ctrl)\n\n\n##Training set #########################################\ndf['est_overcrowded'] = np.nan\n\nfor index,row in df.iterrows():\n #if(row)\n over.input['light'] = row['AvLight']\n over.input['c02_slope2nd'] = row['2nd_slope']\n over.input['temp'] = row['AvTemp']\n over.input['c02_slope'] = row['Slope_CO2']\n #Crunch the numbers\n over.compute()\n df.iloc[index,5] = over.output['overcrowded'].round()\n\n\n##Test set ###################################################\ndf_test['est_overcrowded'] = np.nan\n\nfor index,row in df_test.iterrows():\n #if(row)\n over.input['light'] = row['AvLight']\n over.input['c02_slope2nd'] = row['2nd_slope']\n over.input['temp'] = row['AvTemp']\n over.input['c02_slope'] = row['Slope_CO2']\n #Crunch the numbers\n over.compute()\n df_test.iloc[index,5] = over.output['overcrowded'].round()\n\n#print(df)\n\ncolors = ['blue','red']\ncolormap = matplotlib.colors.ListedColormap(colors)\nx = df.index\n\nplt.figure()\nplt.title(\"Light\")\ny = df['AvLight']\nplt. scatter(x, y, c=np.where(df['Overcrowded'] == df['est_overcrowded'], 0, 1), cmap=colormap)\n\nplt.figure()\nplt.title(\"CO2\")\ny = df['Slope_CO2']\nplt. scatter(x, y, c=np.where(df['Overcrowded'] == df['est_overcrowded'], 0, 1), cmap=colormap)\n\n\n\"\"\"plt.figure()\nplt.title(\"Persons\")\ny = df['Persons']\nplt. scatter(x, y, c=np.where(df['Overcrowded'] == df['est_overcrowded'], 0, 1), cmap=colormap)\"\"\"\n\nplt.figure()\nplt.title(\"co2 declive\")\ny = df['Slope_CO2'].diff(periods=50)\nplt. scatter(x, y, c=np.where(df['Overcrowded'] == df['est_overcrowded'], 0, 1), cmap=colormap)\n\nplt.figure()\nplt.title(\"temp\")\ny = df['AvTemp']\nplt. scatter(x, y, c=np.where(df['Overcrowded'] == df['est_overcrowded'], 0, 1), cmap=colormap)\n\n\"\"\"plt.figure()\nplt.title(\"pir\")\ny = df['AvPIR']\nplt. scatter(x, y, c=np.where(df['Overcrowded'] == df['est_overcrowded'], 0, 1), cmap=colormap)\"\"\"\n\n#check TP fp fn tn\ndf['tp'] = np.where((df['Overcrowded'] == 1) & (df['est_overcrowded'] == 1), 1, 0)\ndf['fp'] = np.where((df['Overcrowded'] == 0) & (df['est_overcrowded'] == 1), 1, 0)\ndf['fn'] = np.where((df['Overcrowded'] == 1) & (df['est_overcrowded'] == 0), 1, 0)\ndf['tn'] = np.where((df['Overcrowded'] == 0) & (df['est_overcrowded'] == 0), 1, 0)\n\ntp = df['tp'].sum()\nfp = df['fp'].sum()\nfn = df['fn'].sum()\ntn = df['tn'].sum()\n\naccuracy = (tp+tn)/(tp+fp+fn+tn)\nprecision = tp/(tp+fp) \nrecall = tp/(tp+fn)\nspecificity = tn/(tn+fp)\nf1score = 2*precision*recall/(precision+recall)\n\nprint(\"Fuzzy System: \\n\")\nprint(f\"accuracy: {accuracy}\")\nprint(f\"precision: {precision}\")\nprint(f\"recall: {recall}\")\nprint(f\"specificity: {specificity}\")\nprint(f\"f1: {f1score}\")\nprint(\"\\n\")\n\n#check TP fp fn tn\ndf_test['tp'] = np.where((df_test['Overcrowded'] == 1) & (df_test['est_overcrowded'] == 1), 1, 0)\ndf_test['fp'] = np.where((df_test['Overcrowded'] == 0) & (df_test['est_overcrowded'] == 1), 1, 0)\ndf_test['fn'] = np.where((df_test['Overcrowded'] == 1) & (df_test['est_overcrowded'] == 0), 1, 0)\ndf_test['tn'] = np.where((df_test['Overcrowded'] == 0) & (df_test['est_overcrowded'] == 0), 1, 0)\n\ntp = df_test['tp'].sum()\nfp = df_test['fp'].sum()\nfn = df_test['fn'].sum()\ntn = df_test['tn'].sum()\n\naccuracy = (tp+tn)/(tp+fp+fn+tn)\nprecision = tp/(tp+fp) \nrecall = tp/(tp+fn)\nspecificity = tn/(tn+fp)\nf1score = 2*precision*recall/(precision+recall)\n\nprint(\"Fuzzy System: \\n\")\nprint(f\"accuracy: {accuracy}\")\nprint(f\"precision: {precision}\")\nprint(f\"recall: {recall}\")\nprint(f\"specificity: {specificity}\")\nprint(f\"f1: {f1score}\")\nprint(\"\\n\")\n\n\n\n\n##Classifier Binary\n\nx = df[[\"AvTemp\",\"AvLight\",\"Slope_CO2\",\"2nd_slope\",\"Overcrowded\"]].values\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(x)\ndf_new = pd.DataFrame(x_scaled, columns=[\"AvTemp\",\"AvLight\",\"Slope_CO2\",\"2nd_slope\",\"Overcrowded\"])\ndf_new = df_new.dropna(axis=0)\nX = df_new[{\"AvTemp\",\"AvLight\",\"Slope_CO2\",\"2nd_slope\"}].to_numpy()\ny = df_new[{\"Overcrowded\"}].to_numpy()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42) \n\ny_train= y_train.astype('int')\ny_train=np.ravel(y_train)\n\n\nsm = SMOTE(random_state=42)\nX_res, y_res = sm.fit_resample(X_train, y_train)\n\nclf = MLPClassifier(solver='lbfgs',activation='relu',random_state=1, max_iter=4000).fit(X_res, y_res)\n\ny_pred = clf.predict(X_test)\n\ny_test= y_test.astype('int')\ny_pred= y_pred.astype('int')\n\nprec = precision_score(y_test,y_pred)\nrecall = recall_score(y_test,y_pred)\nacc = accuracy_score(y_test,y_pred)\nf1 = f1_score(y_test,y_pred)\nprint(\"Precision: \",prec)\nprint(\"Recall: \",recall)\nprint(\"Accuracy: \",acc)\nprint(\"F1: \",f1)\n\nplt.show()\n","repo_name":"Ricardocafee/Projeto_CInte","sub_path":"fuzzy.py","file_name":"fuzzy.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13581911158","text":"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom sklearn.utils import resample\nfrom functions import beta_,mean_squared_error,R2,FrankeFunction,var,X_D\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef OLS(x,y,z,degree,noise,z_star):\n\n X = X_D(x,y,5)\n beta = beta_(X,np.ravel(z))\n #We'll use this for the confidence interval of 95 percent\n\n\n #OlS without scaled data for fith degree polynomial\n \"\"\"\n beta = beta_(X,z)\n\n z_tilde = X.dot(beta)\n z_tilde_plot = np.reshape(z_tilde,(n,n))\n MSE = mean_squared_error(z,z_tilde)\n\n print(\"Degree = 5\")\n print(\"MSE = %.3f\"%MSE)\n R2_score = R2(z,z_tilde,np.mean(z))\n print(\"R2 score = %.3f\"%R2_score)\n \"\"\"\n\n #finding confidence interval\n #We have stochastic noise with sigma = 1\n #We can substract population mean from sample mean and divide by 1/sqrt(n) the standard deviation\n \"\"\"\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n\n # Plot the surface.\n surf = ax.plot_surface(x, y, z_tilde_plot, cmap=cm.coolwarm,linewidth=0, antialiased=False)\n\n # Customize the z axis.\n ax.set_zlim(-0.10, 1.40)\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter(\"%.02f\"))\n\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n plt.show()\"\"\"\n\n\n #Scaling the data, to limit the most extreme points\n\n from sklearn.preprocessing import StandardScaler\n from sklearn.model_selection import train_test_split\n scaler = StandardScaler()\n deg = np.linspace(1,degree,degree)\n MSE_train = np.zeros(degree)\n MSE_test = np.zeros(degree)\n R2_score = np.zeros(degree)\n\n x_train,x_test,y_train,y_test,z_train,z_test = train_test_split(x,y,z,test_size = 0.3)\n\n z_train = np.ravel(z_train)\n z_test = np.ravel(z_test)\n\n MSE_minimum = 10 #The highest MSE we'll allow\n for i in range(1,degree+1):\n print(\"Degree = %.3f\"%i)\n X_train = X_D(x_train,y_train,i) #X will have the same values on the columns\n X_test = X_D(x_test,y_test,i)\n scaler.fit(X_train)\n X_train_scaled = scaler.transform(X_train)\n X_test_scaled = scaler.transform(X_test)\n X_train_scaled[:,0] = 1 #setting the first column = 1 because standard scaler sets it to 0\n X_test_scaled[:,0] = 1\n\n beta_scaled = beta_(X_train_scaled,z_train)\n var_beta = var(X_train_scaled,noise)\n z_tilde_scaled_train = X_train_scaled.dot(beta_scaled)\n z_tilde_scaled_test = X_test_scaled.dot(beta_scaled)\n\n MSE_train[i-1] = mean_squared_error(z_train,z_tilde_scaled_train)\n MSE_test[i-1] = mean_squared_error(z_test,z_tilde_scaled_test)\n R2_score[i-1] = R2(z_train,z_tilde_scaled_train,np.mean(z_train))\n\n if MSE_test[i-1] < MSE_minimum:\n ztilde_best = np.reshape(z_tilde_scaled_test,(x_test.shape[0],x_test.shape[1]))\n beta_best = beta_scaled\n MSE_minimum = MSE_test[i-1]\n i_best = i\n std_beta = np.sqrt(var_beta)*z_star\n print(\"R2_score %.3f %d\" %(R2_score[i-1],i))\n return MSE_train,MSE_test,beta_scaled,std_beta,ztilde_best,i_best,beta_best\n\nif __name__ == '__main__':\n\n np.random.seed(11)\n n = 50\n z_star = 1.96 #We want 95% confidence inerval\n x = np.random.uniform(0,1,n)\n y = np.random.uniform(0,1,n)\n x = np.sort(x)\n y = np.sort(y)\n x,y = np.meshgrid(x,y)\n noise = 0.1\n noise_arr = noise*np.random.randn(n,n)\n z =FrankeFunction(x,y)+noise\n degree = 5\n deg = np.linspace(0,degree,degree)\n MSE_train,MSE_test,beta,std_beta,_,_,_ = OLS(x,y,z,degree,noise,z_star)\n print(std_beta)\n plt.figure()\n plt.style.use(\"seaborn\")\n plt.title(\"MSE for train and test with n = {:d}\".format(n))\n plt.plot(deg,MSE_train,label=\"Train\")\n plt.plot(deg,MSE_test,label=\"Test\")\n plt.xlabel(\"Complexity\")\n plt.ylabel(\"MSE\")\n plt.legend()\n plt.savefig(\"./figures/a_test_train.jpg\",bbox_inches = 'tight',pad_inches = 0.1,dpi=1200)\n plt.show()\n\n\n x_axis = np.linspace(0,len(beta),len(beta))\n plt.figure()\n plt.title(\"Beta coefficients with their confidence intervals calculated for n = {:d}\".format(n))\n plt.xlabel(r\"$\\beta$ \")\n plt.errorbar(x_axis,beta,std_beta,fmt=\"o\")\n plt.savefig(\"./figures/a_beta.jpg\",bbox_inches = 'tight',pad_inches = 0.1,dpi=1200)\n plt.show()\n","repo_name":"jacobllie/FYS-STK4155","sub_path":"Project1/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"40495186380","text":"\"\"\"\nmpstest15.py\nA test of manipulating matrix product states with numpy.\nThere is an upper bound chi for bond dimensions in getMPSOBC()\nVariable bond dimension.\n2014-08-29\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cmath import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\ndef main():\n test3()\n\ndef test1():\n \"\"\" Test functions just for a simple case.\n \"\"\"\n mps0 = randomMPSOBC(3,3,2)\n state0 = getStateOBC(mps0)\n mps1 = getMPSOBC(state0,4)\n state1 = getStateOBC(mps1)\n print(\"Test completed. State Fidelity = %f. MPS Fidelity = %f\"\n %(np.absolute(fidelity(state0,state1)),\n np.absolute(fidelityMPS(mps0,mps1))))\n\ndef test3():\n \"\"\" Test MPS conversion functions by computing fidelity between\n generated MPS and orginal, with new and old bond dimensions\n chi0 and chi1 varied.\n \"\"\"\n print(\"*** Started testing MPS ***\")\n N = 5\n d = 2\n nTrials = 3\n # Points to plot on 3d graph\n (X,Y,Z) = ([],[],[])\n for chi0 in xrange(1,10):\n for chi1 in xrange(1,10):\n F = 0\n # Run random test for 20 points and take average fidelity\n for i in xrange(nTrials):\n mps0 = randomMPSOBC(N,chi0,d) # Make random MPS\n state0 = getStateOBC(mps0) # Convert to state\n mps1 = getMPSOBC(state0,chi1) # Convert back to MPS with new bond dimension\n state1 = getStateOBC(mps1) # Convert back to state\n F += np.absolute(fidelityMPS(mps0,mps1)) # Compute fidelity and add to sum\n # F += fidelity(state0,state1) # Uncomment this to try with vectors\n X.append(chi0)\n Y.append(chi1)\n Z.append(F/nTrials)\n X = np.array(X)\n Y = np.array(Y)\n Z = np.array(Z)\n # Plot the surface\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)\n ax.set_xlabel('chi0')\n ax.set_ylabel('chi1')\n ax.set_zlabel('fidelity')\n plt.show()\n print(\"*** Finished testing MPS ***\")\n\ndef fidelityMPS(A,B):\n \"\"\" Fidelity of two MPS representations\n f = /().\n \"\"\"\n return innerProductOBC(A,B)*innerProductOBC(B,A)\\\n /innerProductOBC(A,A)/innerProductOBC(B,B)\n\ndef fidelity(a,b):\n \"\"\" Fidelity of two state vectors\n f = /().\n \"\"\"\n return np.inner(np.conj(a),b)*np.inner(np.conj(b),a)\\\n /np.inner(np.conj(a),a)/np.inner(np.conj(b),b)\n\ndef randomMPSOBC(N,chi,d):\n \"\"\" Returns a random MPS given parameters N, chi, d.\"\"\"\n A = [randomComplex((d,chi))]\n for i in xrange(N-2):\n A.append(randomComplex((chi,d,chi)))\n A.append(randomComplex((chi,d)))\n return A\n\ndef getStateOBC(A):\n \"\"\"\n State vector of MPS with open boundary conditions.\n \"\"\"\n N = len(A) # Number of spins\n c = A[0]\n for i in xrange(1,N):\n c = np.tensordot(c,A[i],axes=(-1,0))\n return np.reshape(c,c.size)\n\ndef getMPSOBC(state,chi):\n \"\"\"\n Matrix product state representation of a state with bond\n dimension chi and open boundary conditions.\n \"\"\"\n d = 2 # Qubits have 2 states each\n N = int(np.log2(len(state))) # Number of qubits\n## show(N,\"N = \")\n\n c = np.reshape(state,cShape(d,N)) # State amplitudes tensor c.\n A = [] # List of N matrices of MPS, each of shape (chi,d,chi)\n\n # Start left end with a vector of size (d,chi)\n c = np.reshape(c,(d,d**(N-1))) # Reshape c\n (ap,c) = efficientSVD(c,chi)\n A.append(ap) # Contract and append to A\n\n## printIntermediate(A,c,state,d,N)\n\n # Sweep through the middle, creating matrix products each with\n # shape (chi,d,chi)\n for i in xrange(1,N-2):\n #c = np.reshape(c,(d,chi,d**(N-i-1)))\n #c = np.transpose(c,(1,0,2))\n## print(\"Executing routine i = %d\"%i)\n## show(c,\"c before reshape\")\n c = np.reshape(c,(d*A[-1].shape[-1],c.size/(d*A[-1].shape[-1])))\n## show(c,\"c after reshape\")\n (a,c) = efficientSVD(c,chi)\n## show(a,\"a\")\n## show(c,\"c after svd\")\n a = np.reshape(a,(A[-1].shape[-1],d,c.shape[0]))\n## show(a,\"a after reshape, just before append\")\n A.append(a)\n## printIntermediate(A,c,state,d,N)\n \n # Finish right end with the remaining vector\n c = np.reshape(c,(c.size/d,d))\n (a,c) = efficientSVD(c,chi)\n a = np.reshape(a,(A[-1].shape[-1],d,c.shape[0]))\n A.append(a)\n A.append(c)\n\n## for a in A:\n## print(a.shape)\n## prod = A[0]\n## for i in xrange(1,N):\n## prod = np.tensordot(prod,A[i],axes=(-1,0))\n## show(np.sum(np.absolute((prod-np.reshape(state,cShape(d,N))))),\n## \"Difference\")\n return A\n\ndef show(a,name):\n \"\"\" Convenient space-saving print function.\n \"\"\"\n print(name)\n print(np.round(np.absolute(a),2))\n\ndef printIntermediate(A,c,state,d,N):\n \"\"\" Testing method which compares contracted state from matrix\n products produced so far with original state\n \"\"\"\n print(\"Printing intermediate with %d elements in A.\"%len(A))\n prod = A[0]\n for i in xrange(1,len(A)):\n prod = np.tensordot(prod,A[i],axes=(-1,0))\n prod = np.tensordot(prod,c,axes=(-1,0))\n prod = np.reshape(prod,cShape(d,N))\n fid = fidelity(state,np.reshape(prod,d**N))\n dif = (np.sum(np.absolute(prod-np.reshape(state,cShape(d,N)))))\n print(\"Difference = %f; fidelity = %f;\"%(dif,np.absolute(fid)))\n\ndef innerProductOBC(mpsA,mpsB):\n \"\"\" Inner product using transfer matrices\n where A and B are MPS representations of }A> and }B>\n with open boundary conditions (OBC).\n \"\"\"\n # Take adjoint of |A> to get None:\n # 変数名の保存\n # --- 可視化の際に使用\n self.target_var_name = var_name\n\n # 変数名に対応するインデックスの取得\n var_index = self.var_names.index(var_name)\n\n # 線形グリッドの作成\n # --- ターゲット特徴量の最小値/最大値からNグリッドを作成\n value_range = np.linspace(\n self.X[:, var_index].min(),\n self.X[:, var_index].max(),\n num=n_grid\n )\n\n # モデル予測値\n # --- インスタンスごと\n individual_prediction = np.array([\n self._counterfactual_prediction(var_index, x)[ids_to_compute]\n for x in value_range\n ])\n\n # データフレームを作成\n # --- ICEをまとめる\n self.df_ice = \\\n pd.DataFrame(data=individual_prediction, columns=ids_to_compute) \\\n .assign(**{var_name: value_range}) \\\n .melt(id_vars=var_name, var_name=\"instance\", value_name=\"ice\")\n\n self.df_instance = \\\n pd.DataFrame(data=self.X[ids_to_compute], columns=self.var_names) \\\n .assign(instance=ids_to_compute,\n prediction=self.estimator.predict(self.X[ids_to_compute])) \\\n .loc[:, [\"instance\", \"prediction\"] + self.var_names]\n\n def plot(self, ylim: List[float]) -> None:\n fig, ax = plt.subplots()\n\n # ラインプロット\n # --- ICEの線\n sns.lineplot(\n self.target_var_name,\n \"ice\",\n units=\"instance\",\n data=self.df_ice,\n lw=0.8,\n alpha=0.5,\n estimator=None,\n zorder=1,\n ax=ax\n )\n\n # プロット作成\n # --- インスタンスから実際の予測点でプロット\n sns.scatterplot(\n self.target_var_name,\n \"predicti\"\n \"on\",\n data=self.df_instance,\n zorder=2,\n ax=ax\n )\n\n ax.set(xlabel=self.target_var_name, ylabel=\"Prediction\", ylim=ylim)\n fig.suptitle(f\"Individual Conditional Exception ({self.target_var_name})\")\n\n fig.show()\n\n\n# 2 シミュレーションデータへのICEの適用 ---------------------------------------\n\n# インスタンス生成\nice = IndividualConditionalException(estimator=rf, X=X_test,\n var_names=[\"X0\", \"X1\", \"X2\"])\n\n# デバッグ用\n# self = ice\n\n\n# ids_to_compute:0 --------------------------------\n\n# ICEの計算\n# --- X1 / インスタンス0\nice.individual_conditional_exception(var_name=\"X1\", ids_to_compute=[0])\n\n# 出力\nice.df_instance\n\n# プロット作成\nice.plot(ylim=(-6, 6))\n\n\n# ids_to_compute:1 --------------------------------\n\n# ICEの計算\n# --- X1 / インスタンス1\nice.individual_conditional_exception(var_name=\"X1\", ids_to_compute=[1])\n\n# 出力\nice.df_instance\n\n# プロット作成\nice.plot(ylim=(-6, 6))\n\n\n# ids_to_compute:0-20 --------------------------------\n\n# ICEの計算\n# --- X1 / インスタンス1\nice.individual_conditional_exception(var_name=\"X1\", ids_to_compute=range(20))\n\n# 出力\nice.df_instance\n\n# プロット作成\nice.plot(ylim=(-6, 6))\n\n","repo_name":"delta0726/py-machine_learning","sub_path":"book/ml_interpret_book/chap5-3_Individual_Conditional_Exception.py","file_name":"chap5-3_Individual_Conditional_Exception.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"820933134","text":"#! python3\n# image_site_downloader.py\n\nimport requests\nimport bs4\nimport os\n\nDESIRED_SEARCH_TERM = str(input('Enter search term'))\nSEARCH_URL = 'https://unsplash.com/search/photos/' + str(DESIRED_SEARCH_TERM)\n\n\ndef get_page_soup(url):\n requests_object = requests.get(url)\n requests_object.raise_for_status()\n print('Downloading %s' % url)\n page_soup = bs4.BeautifulSoup(requests_object.text, \"lxml\")\n return page_soup\n\n\ndef get_list_of_image_urls(soup_object):\n raw_links = soup_object.select('img[src]')\n image_url_list = [item.get('src') for item in raw_links if\n '&w=1000' in item.get('src')] # Purifies list to give search images\n list_of_discrete_image_urls = list(set(image_url_list))\n return list_of_discrete_image_urls\n\n\ndef write_images(list_of_image_urls):\n for counter, image_url in enumerate(list_of_image_urls):\n requests.get(image_url).raise_for_status()\n image_object = requests.get(image_url).content\n save_the_image(image_object, counter)\n\n\ndef save_the_image(image_object_to_write_to_file, counter):\n folder_path_to_save_images_into = os.path.join(os.getcwd(), 'Search for ' + str(DESIRED_SEARCH_TERM).title() + ' Images')\n os.makedirs(folder_path_to_save_images_into, exist_ok=True)\n image_file_path = os.path.join(folder_path_to_save_images_into, str(DESIRED_SEARCH_TERM) + str(counter) + '.jpg')\n with open(image_file_path, 'wb') as infile:\n infile.write(image_object_to_write_to_file)\n print('Written image %s\\n' % counter)\n\n\n# Engine\npage_soup = get_page_soup(SEARCH_URL)\nimage_url_list = get_list_of_image_urls(page_soup)\nwrite_images(image_url_list)\nprint('Done')\n\n# Solved! :)\n","repo_name":"SnowOx/dk","sub_path":"image_site_downloader.py","file_name":"image_site_downloader.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34084520785","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n \nimport socket\n\n\nCR = '\\r'\nLF = '\\n'\nCRLF = CR+LF\n\nhost = \"pop.mail.ru\"\nport = 110\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nfile\n\n\ndef conn():\n sock.connect((host, port))\n global file\n file = sock.makefile('rb')\n res = sock.recv(1024)\n if res == \"+OK\" + CRLF:\n return True\n else:\n return False\n\n\ndef send(command):\n sock.send(command + CRLF)\n return sock.recv(1024)\n\n\ndef auth(login, password):\n if send(\"USER \" + login)[:3] == \"+OK\" and send(\"PASS \" + password)[:3] == \"+OK\":\n return True\n else:\n return False\n\n\ndef list_cmd(number_message=None):\n\n if not number_message:\n messages_count = 0\n octets_count = 0\n numbers_and_octets_messages = []\n result = send(\"LIST\")\n if result[:3] == \"+OK\":\n result = result.split(\"\\r\\n\")\n line1 = result[0].split(\" \")\n messages_count = int(line1[1])\n octets_count = int(line1[3][1:])\n for i in range(1, messages_count + 1):\n number_octets_message = result[i].split(\" \")\n try:\n numbers_and_octets_messages.append((int(number_octets_message[0]), int(number_octets_message[1])))\n except:\n break\n return (messages_count, octets_count, numbers_and_octets_messages)\n else:\n return False\n\n else:\n if isinstance(number_message, int):\n result = send(\"LIST\")\n if result[:3] == \"+OK\":\n result = result.split(\" \")\n return int(result[1]), int(result[3][1:])\n else:\n return False\n else:\n return False\n\n\ndef retr(number_message):\n retr_res = send(\"RETR \" + str(number_message))\n if retr_res[:3] == \"+OK\":\n message = ''\n while True:\n line = file.readline()\n if line != '.' + CRLF:\n message += line\n else:\n break\n return message\n return False\n\n\ndef rset():\n if send(\"RSET\")[:3] == \"+OK\":\n return True\n else:\n return False\n\n\ndef stat():\n stat_res = send(\"STAT\")\n if stat_res[:3] == \"+OK\":\n stat_res = stat_res.split(' ')\n return (int(stat_res[1]), int(stat_res[2][:-2]))\n else:\n return False\n\n\ndef quit():\n if send(\"QUIT\")[:3] == \"+OK\":\n sock.close()\n return True\n else:\n sock.close()\n return False\n\n\n\n\n","repo_name":"Tomarrech/tmiyap_2013","sub_path":"kurs-mail/mailClientPOP.py","file_name":"mailClientPOP.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11570654557","text":"from sage.all import vector, matrix\nfrom sage.all import RootSystem\nfrom sage.all import var\n\ndef convertWeightToList(weight):\n arr = []\n for index, value in sorted(weight):\n while len(arr) < index:\n arr.append(0)\n arr.append(value)\n while len(arr) < weight.parent().dimension():\n arr.append(0)\n return arr\n\ndef buildBasisChangeToStandard(basis):\n bChange = [convertWeightToList(x) for x in basis]\n\n # insert standard e_i for i > len(basis) to make a square vector\n for i in range(len(bChange), len(bChange[0])):\n bChange.append([1 if j==i else 0 for j in range(0, len(bChange[0]))])\n\n # transpose gets simple to standard basis change, inverse reverses\n return matrix(bChange).transpose().inverse()\n\ndef getStandardToSimpleBasisChange(lie_algebra_name):\n simples = RootSystem(lie_algebra_name).ambient_space().simple_roots()\n return buildBasisChangeToStandard(simples)\n\ndef getStandardToFundamentalBasisChange(lie_algebra_name):\n fundamentals = RootSystem(lie_algebra_name).ambient_space().fundamental_weights()\n return buildBasisChangeToStandard(fundamentals)\n\ndef getFundamentalToSimpleBasisChange(lie_algebra_name):\n standard_to_simple_basis_change = getStandardToSimpleBasisChange(lie_algebra_name)\n standard_to_fundamental_basis_change = getStandardToFundamentalBasisChange(lie_algebra_name)\n return standard_to_simple_basis_change * standard_to_fundamental_basis_change.inverse()\n\ndef changeFundamentalWeightToSimple(lie_algebra_name, weight):\n fund_to_simple = getFundamentalToSimpleBasisChange(lie_algebra_name)\n\n while not len(weight) == fund_to_simple.ncols():\n weight.append(0)\n return list(fund_to_simple * vector(weight))\n\ndef getPositiveRoots(name):\n standard_to_simple_basis_change = getStandardToSimpleBasisChange(name)\n positive_roots = [vector(convertWeightToList(x)) for x in RootSystem(name).ambient_space().positive_roots()]\n return [standard_to_simple_basis_change * x for x in positive_roots]\n\ndef getVariableDictionary(lie_algebra, q_analog):\n s = \"\"\n if q_analog:\n s = \"q, \"\n s += \", \".join([f\"A{i + 1}\" for i in range(lie_algebra.dimension())])\n variables = var(s)\n return {str(variable): variable for variable in variables}\n\ndef geometricSumForPartition(positive_root, translations, q_analog):\n x = 1 if not q_analog else translations[\"q\"]\n for i in range(len(positive_root)):\n x = x * (translations[\"A\" + str(i+1)] ** positive_root[i])\n return 1/(1 - x)\n\ndef getLambda(lie_algebra, standard_to_simple_basis_change, lamb):\n # if lamb is not specified, highest root is used\n if lamb == None:\n return lie_algebra.highest_root()\n elif type(lamb) is list:\n while not len(lamb) == lie_algebra.dimension():\n lamb.append(0)\n temp = standard_to_simple_basis_change.inverse() * vector(lamb)\n return lie_algebra(list(temp))\n\n return lamb\n\ndef getMu(lie_algebra, standard_to_simple_basis_change, mu):\n # if mu is not specified, 0 vector is used\n if mu == None:\n return lie_algebra(0)\n elif type(mu) is list:\n while not len(mu) == lie_algebra.dimension():\n mu.append(0)\n temp = standard_to_simple_basis_change.inverse() * vector(mu)\n return lie_algebra(list(temp))\n\n return mu\n","repo_name":"antman1935/lie_algebras","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"73966695238","text":"#!/usr/bin/env python3\n\n# https://leetcode-cn.com/problems/number-of-boomerangs\n# 给定平面上 n 对不同的点,“回旋镖” 是由点表示的元组 (i, j, k) ,其中 i 和 j 之间的距离和 i 和 k 之间的距离相等(需要考虑元组的顺序)。\n# 找到所有回旋镖的数量。你可以假设 n 最大为 500,所有点的坐标在闭区间 [-10000, 10000] 中。\n#\n# 示例:\n# 输入:\n# [[0,0],[1,0],[2,0]]\n# 输出:\n# 2\n#\n# 解释:\n# 两个回旋镖为 [[1,0],[0,0],[2,0]] 和 [[1,0],[2,0],[0,0]]\n\n\nclass Solution:\n def numberOfBoomerangs(self, points: [[int]]) -> int:\n result = 0\n for i in points:\n temp = {}\n for j in points:\n d = (i[0] - j[0]) ** 2 + (i[1] - j[1]) ** 2\n temp[d] = temp.get(d, 0) + 1\n for v in list(temp.values()):\n if v >= 2:\n result += v * (v - 1)\n return result\n\n\nprint(Solution().numberOfBoomerangs([[0, 0], [1, 0], [2, 0]])) # 2\n","repo_name":"HeDefine/LeetCodePractice","sub_path":"Q447.回旋镖的数量.py","file_name":"Q447.回旋镖的数量.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35134029827","text":"import sys\nfrom itertools import combinations\n\nn, m = map(int, sys.stdin.readline().split())\ndeck = list(map(int, sys.stdin.readline().split()))\n\nc_list = list(combinations(deck, 3))\ngap = 1e9\n\nfor c in c_list:\n if 0 <= m-sum(c) < gap:\n gap = m - sum(c)\n\nprint(m - gap)\n","repo_name":"96hsjeong/Baekjoon-StepByStep","sub_path":"level/11/2798.py","file_name":"2798.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34118829044","text":"def add() :\n x=10\n y=20\n sum=x+y\n print(sum)\nadd()\nadd()\ndef wifi():\n x=int(input(\"Enter First Number :\"))\n y=int(input(\"Enter Second Number :\"))\n result=x+y\n print(result)\nwifi()\ndef real(y):\n x=10\n z=x+y\n print(z)\nreal(10)\n","repo_name":"nazim164/Python-Code","sub_path":"get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34991405871","text":"# importing the required modules\nimport pickle\nimport re\n\nfrom testrerikom.wsgi import *\nimport requests\nfrom kafka import KafkaConsumer\nmy_consumer = KafkaConsumer(\n 'testnum',\n bootstrap_servers = ['localhost : 9092'],\n api_version=(0,10,1),\n auto_offset_reset = 'ea rliest',\n enable_auto_commit = True,\n group_id = 'my-group',\n #value_deserializer = lambda x : loads(x.decode('utf-8'))\n )\nfor msg in my_consumer:\n deserialized_data = pickle.loads(msg.value)\n if re.search(r'\\абракадабра\\b', deserialized_data['message'].lower()):\n post_data = {'id': deserialized_data['id'], 'success': True}\n response = requests.post('http://127.0.0.1:8000/api/v1/message_confirmation/', post_data)\n else:\n post_data = {'id': deserialized_data['id'], 'success': False}\n response = requests.post('http://127.0.0.1:8000/api/v1/message_confirmation/', post_data)\n","repo_name":"Danis2019/testrerikom","sub_path":"listiner.py","file_name":"listiner.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3413334466","text":"from list import List\nfrom node import Node\n\n\nclass CircularSinglyList(List):\n def __init__(self):\n super().__init__(None, 0)\n self.tail = None\n\n def inserAtFirst(self, data):\n newNode: Node = Node(data)\n if self.head:\n newNode.next = self.head\n self.head = newNode\n self.tail.next = newNode\n else:\n self.head = newNode\n newNode.next = self.head\n self.tail = newNode\n self.size += 1\n\n def insertAt(self, data, index):\n if index == 1:\n self.inserAtFirst(data)\n elif index > 0 and index <= self.size + 1:\n newNode: Node = Node(data)\n currentPtr: Node = self.head\n cnt: int = 1\n while cnt < index - 1:\n currentPtr = currentPtr.next\n cnt += 1\n newNode.next = currentPtr.next\n currentPtr.next = newNode\n self.size += 1\n else:\n self.outOfBound(index)\n\n def insertAtLast(self, data):\n newNode: Node = Node(data)\n if self.head:\n newNode.next = self.head\n self.tail.next = newNode\n self.tail = newNode\n self.size += 1\n else:\n self.inserAtFirst(data)\n\n def removeAtFirst(self):\n if self.size == 1:\n self.head = None\n self.tail = None\n elif self.head:\n self.tail.next = self.head.next\n self.head = self.head.next\n self.size -= 1\n\n def removeAt(self, index):\n if index < 1 or index > self.size:\n self.outOfBound(index)\n elif self.size == 1 or index == 1:\n self.removeAtFirst()\n else:\n currentPtr: Node = self.head\n cnt: int = 1\n while cnt < index - 1:\n currentPtr = currentPtr.next\n cnt += 1\n removePtr: Node = currentPtr.next\n if removePtr == self.tail:\n currentPtr.next = self.head\n self.tail = currentPtr\n currentPtr.next = removePtr.next\n self.size -= 1\n\n def removeAtLast(self):\n if self.size == 1:\n self.removeAtFirst()\n elif self.head:\n currentPtr: Node = self.head\n cnt: int = 1\n while currentPtr.next != self.tail:\n currentPtr = currentPtr.next\n self.tail = currentPtr\n currentPtr.next = self.head\n self.size -= 1\n\n def printList(self):\n if self.head:\n currentPtr: Node = self.head\n print('Head -> ', end='')\n while currentPtr.next != self.head:\n print(currentPtr.data, '-> ', end='')\n currentPtr = currentPtr.next\n print(self.tail.data, '(Tail) -> (Head)',\n currentPtr.next.data, '...')\n print('List Size:', self.size)\n\n else:\n print('Empty List!')\n\n def getNextNode(self, index):\n if index > 0 and self.head:\n currentPtr: Node = self.head\n cnt: int = 1\n while cnt < index:\n currentPtr = currentPtr.next\n cnt += 1\n print(currentPtr.data, '->', currentPtr.next.data)\n\n else:\n self.outOfBound(index)\n","repo_name":"MdReyadHossain/Data-Structure-and-Algorithm","sub_path":"Linked_List/circular_singly.py","file_name":"circular_singly.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25650007121","text":"import math\n\nimport numpy as np\n\nfrom danger_zone.agents.pedestrian_pathfinder import PedestrianPathfinder\nfrom danger_zone.map.tile_types import Tile, PEDESTRIAN_ZONES\n\n\nclass Pedestrian:\n \"\"\"Class representing a pedestrian agent.\"\"\"\n\n def __init__(self, position, target, map_state):\n \"\"\"\n Constructs a new instance of this class.\n\n :param position: The initial (spawn) position.\n :param target: The target position of this agent.\n :param map_state: The current MapState instance.\n \"\"\"\n\n self.position = position\n self.target = target\n self.map_state = map_state\n\n def move(self):\n \"\"\"\n Evaluates the current map tick state and tries to make a move.\n\n Instructs a pathfinding algorithm to find a good path to its target. If that algorithm finds no path (due to\n other agents being in the way), it falls back on a best-effort basis, making an arbitrary move (in the general\n direction of the target).\n \"\"\"\n\n pathfinder = PedestrianPathfinder(self.map_state, self)\n path = pathfinder.astar(self.position, self.target)\n\n if path is not None:\n path = list(path)\n if len(path) > 1:\n self.move_to_position(list(path)[1])\n else:\n # Choose neighbour that goes roughly into direction of the goal if A* finds no path, to avoid deadlock\n self.move_to_best_effort_neighbour()\n\n def move_to_best_effort_neighbour(self):\n \"\"\"Moves to a neighbour that is either arbitrarily chosen or moves closest to the target.\"\"\"\n\n directions = ((1, 0), (0, 1), (-1, 0), (0, -1))\n neighbour_tile_positions = [(self.position[0] + dir[0], self.position[1] + dir[1]) for dir in directions]\n\n if np.random.randint(0, 2) == 0:\n # Sort by closeness to target (pick best-effort first)\n neighbour_tile_positions = sorted(neighbour_tile_positions,\n key=lambda tile: math.hypot(self.target[0] - tile[0],\n self.target[1] - tile[1]))\n neighbour_tiles = [self.map_state.get_dynamic_tile(*pos) for pos in neighbour_tile_positions]\n\n for i in range(len(neighbour_tiles)):\n if neighbour_tiles[i] in PEDESTRIAN_ZONES:\n self.move_to_position(neighbour_tile_positions[i])\n return\n\n def move_to_position(self, new_position):\n \"\"\"\n Moves to the given position and updates the MapState cache appropriately.\n\n :param new_position: The position to move to.\n \"\"\"\n\n self.map_state.set_tile_in_cache(*self.position, Tile.EMPTY)\n self.map_state.set_tile_in_cache(*new_position, Tile.PEDESTRIAN)\n self.position = new_position\n\n assert self.map_state.map.is_on_main_map(*new_position) \\\n or self.map_state.map.get_tile(*new_position) == Tile.PEDESTRIAN_SPAWN, \\\n \"Pedestrian has left the board\"\n\n def is_done(self):\n \"\"\"\n Determines whether the agent has reached its target.\n\n :return: `True` iff. the agent has reached its target (and can thus be removed from the map).\n \"\"\"\n\n return self.position == self.target\n","repo_name":"gandreadis/danger-zone","sub_path":"danger_zone/agents/pedestrian.py","file_name":"pedestrian.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"72184459397","text":"import numpy as np\r\nimport pandas as pd \r\nimport os \r\nimport sys \r\nfrom .tesc import *\r\n\r\nclass LiftTesc(object): \r\n \"\"\"LiftTesc Class\r\n Consume both labeled data and unlabeled data in training phase\r\n Take the ideas of dividing training dataset into subspaces and pass it to TESC.\r\n \r\n Paramters:\r\n :param n_labels: declare the specific pre-defined of class labels.\r\n :type n_labels: int\r\n :param n_features: declare the number of features representing for a sample.\r\n :type n_features: int\r\n \"\"\"\r\n def __init__(self, n_labels=5, n_features=3):\r\n self.n_labels = n_labels\r\n self.n_features = n_features\r\n self.final_clusters = list()\r\n return\r\n\r\n def fit(self, X1, Y1, X2, L1, L2):\r\n \"\"\"Fit classifier with training data \r\n Parameters:\r\n :param X1: input features of labeled data\r\n :type X1: sparse CSR matrix (n_samples, n_features)\r\n :param Y1: binary indicator matrix with label assignments of labeled data\r\n :type Y1: dense matrix (n_samples, n_labels)\r\n :param X2: input features of unlabeled data\r\n :type X2: sparse CSR matrix (n_samples, n_features)\r\n :param L1: A set of labels that all appear in every row of Y1\r\n :type L1: A array of int. for example [1, 3]\r\n :param L2: A set of labels that compliment L1. L2 U L1 = L\r\n :type L2: A array of int. for example [2, 4, 5]\r\n Returns:\r\n Fitted instance of self\r\n \"\"\"\r\n #print(\"LIFT_TESC - fit!\")\r\n self.X1 = X1\r\n self.Y1 = Y1\r\n self.Label_lamda = np.zeros((Y1.shape[0], 1), dtype=int) #Only labeled data need lamda\r\n self.X2 = X2\r\n self.L1 = L1\r\n self.L2 = L2\r\n self.run()\r\n return self\r\n\r\n def run(self):\r\n \"\"\"A procedure called in fit method of classifier which perfroms a set of sub-procedures. \r\n A series of sub-procedures are call as below: \r\n find_lambda, assign_DL_with_lambda, run_TESC_on_D\r\n get_clusters_from_TESC,\r\n clustering_on_D1, clustering_on_D2, clustering_on_D3\r\n print_clusters\r\n Parameters:\r\n :None:\r\n Returns:\r\n :None:\r\n \"\"\"\r\n if (self.L1.size == 0):\r\n self.find_lambda_greedy()\r\n else: \r\n self.find_lambda_graph()\r\n \r\n self.assign_DL_with_lambda()\r\n self.run_Tesc_on_D()\r\n self.get_clusters_from_Tesc()\r\n self.clustering_on_D1();\r\n self.clustering_on_D2();\r\n self.clustering_on_D3();\r\n #self.print_clusters();\r\n return\r\n \r\n def find_lambda_graph(self):\r\n \"\"\"Find the lamda in L2 which make a co-concurrent graph with L1\r\n If there are more than 1 labels with the same maximum, \r\n Choose the label with the lowest index.\r\n Parameters: \r\n :param Y1: the label set of labeled data\r\n :type Y1: dense matrix (n_samples, n_labels)\r\n :param L2: L2 = L/L1 Example, L2 = {a, b, c, e, f}\r\n \r\n Returns:\r\n :returns: The index of lmbda {b}\r\n :rtype: int\r\n \"\"\"\r\n #print(\"LIFT_TESC - find lambda - Co-occurence Graph!\")\r\n for label in self.L2:\r\n max_value = -1\r\n max_index = -1\r\n val_label = 0\r\n for y in self.Y1:\r\n for l1 in self.L1:\r\n l1 = int(l1)\r\n if (y[label] == 1 and y[l1] == 1):\r\n val_label += 1\r\n if (max_value < val_label):\r\n max_value = val_label\r\n max_index = label\r\n self.lamda = max_index\r\n return\r\n\r\n def find_lambda_greedy(self):\r\n \"\"\"Find the lamda appears in Y1 (the label of L2 appears most frequent)\r\n If there are more than 1 labels with the same maximum, \r\n Choose the label with the lowest index.\r\n Parameters: \r\n :param Y1: the label set of labeled data\r\n :type Y1: dense matrix (n_samples, n_labels)\r\n :param L2: L2 = L/L1 Example, L2 = {a, b, c, e, f}\r\n \r\n Returns:\r\n :returns: The index of lmbda {b}\r\n :rtype: int\r\n \"\"\"\r\n #print(\"LIFT_TESC - find lambda - Greedy!\")\r\n label_sum = np.sum(self.Y1, axis=0)\r\n if label_sum.ndim == 2:\r\n label_sum = label_sum.getA1()\r\n max_index = -1\r\n max_value = -1\r\n for i in range(self.n_labels):\r\n if max_value < label_sum[i] and np.any(self.L2 == i): #Check lamda in L2 or not\r\n max_value = label_sum[i]\r\n max_index = i\r\n self.lamda = max_index\r\n return\r\n \r\n def assign_DL_with_lambda(self):\r\n \"\"\"Assign the labeled data with assumed labels - lamdba1, lamdba2, lamdba3.\r\n Method: \r\n If labeled data only consists of L1 and lamdba.\r\n Assign Label Lamdba = 1\r\n If labeled data only consists of L1 and lamdba and other labels.\r\n Assign Label Lamdba = 2\r\n If labeled data only doesnt contain lamdba.\r\n Assign Label Lamdba = 3\r\n Parameters: \r\n :lamda: The lamda found from the set L2 satifying the condition.\r\n :type lamda: int\r\n Returns:\r\n :returns: A list of lamdba labels for labeled data, attached it to the list of Y1.\r\n :rtype: A dense matrix Y1_lambda\r\n \"\"\"\r\n #print(\"LIFT_TESC - assign DL with Lambda!\")\r\n\r\n #Add lambda to L1 and start classifying\r\n L1_lamda = np.append(self.L1, self.lamda)\r\n L1_lamda = L1_lamda.astype(int)\r\n #Make a list of ~L1_lambda by using mask\r\n mask = np.ones(self.n_labels, dtype=bool)\r\n mask[L1_lamda] = False\r\n\r\n n_elements = self.Y1.shape[0]\r\n for i in range(n_elements):\r\n cond1 = not np.any(self.Y1[i, L1_lamda] == 0)\r\n cond2 = not np.any(self.Y1[i, mask] == 1)\r\n\r\n if cond1 and cond2:\r\n self.Label_lamda[i] = 1 # Lamda1\r\n elif cond1 and not cond2:\r\n self.Label_lamda[i] = 2 # Lamda2\r\n else:\r\n self.Label_lamda[i] = 3 # Lamda3\r\n self.Y1 = np.append(self.Label_lamda, self.Y1, axis=1)\r\n return\r\n\r\n def run_Tesc_on_D(self):\r\n \"\"\"The procedure performing TESC on training data.\r\n Parameters:\r\n {none}\r\n Returns:\r\n {none}\r\n \"\"\"\r\n #print(\"LIFT_TESC - run Tesc on D!\")\r\n self.tesc = Tesc(n_features=self.n_features, n_labels=self.n_labels)\r\n self.tesc.fit(self.X1, np.array(self.Y1), self.X2)\r\n return\r\n\r\n def get_clusters_from_Tesc(self):\r\n \"\"\"The procedure dividing D into D1, D2, and D3 as result from TESC clustering.\r\n Method: \r\n If labeled data only consists of L1 and lamdba.\r\n Assign data to D1\r\n If labeled data only consists of L1 and lamdba and other labels.\r\n Assign data to D2\r\n If labeled data only doesnt contain lamdba.\r\n Assign data to D3\r\n Parameters: \r\n {none}\r\n Returns:\r\n {none}\r\n \"\"\"\r\n #print(\"LIFT_TESC - get clusters from Tesc!\")\r\n self.D1 = Cluster(n_labels=self.n_labels, n_features=self.n_features)\r\n self.D2 = Cluster(n_labels=self.n_labels, n_features=self.n_features)\r\n self.D3 = Cluster(n_labels=self.n_labels, n_features=self.n_features)\r\n\r\n n_elements = 0 \r\n clusters = self.tesc.get_clusters()\r\n for cl in clusters:\r\n lamda = cl.get_lamda()\r\n if (lamda == 1):\r\n #print(\"m1\")\r\n self.D1.merge_cluster(cl)\r\n if (lamda == 2):\r\n #print(\"m2\")\r\n self.D2.merge_cluster(cl)\r\n if (lamda == 3):\r\n #print(\"m3\")\r\n self.D3.merge_cluster(cl)\r\n return \r\n\r\n def clustering_on_D1(self):\r\n \"\"\"Perform clustering on D1\r\n Method: \r\n Add D1 into the set of output clusters\r\n Parameters: \r\n :param D1: A cluster of labeled data and unlabeled data which are assigned to lambda1.\r\n :type D1: An object of Cluster\r\n Returns:\r\n :returns: A list of output clusters\r\n :rtype: A list [].\r\n \"\"\"\r\n #print(\"LIFT_TESC - clustering on D1!\")\r\n if self.D1.check_empty() != True:\r\n if self.D1.check_label_similarity() == True:\r\n self.final_clusters.append(self.D1)\r\n else:\r\n print(\"---------Error at adding D1 to clusters\")\r\n return \r\n\r\n def clustering_on_D2(self):\r\n \"\"\"Perform clustering on D2\r\n Method: \r\n Add D2 to a set of output clusters if all data in D2 contains the same labels.\r\n Otherwise: \r\n Call LiftTesc on D2\r\n Parameters: \r\n :param D2: A cluster of labeled data and unlabeled data which are assigned to lambda2.\r\n :type D2: An object of Cluster\r\n Returns:\r\n :returns: A list of output clusters\r\n :rtype: A list [].\r\n \"\"\"\r\n #print(\"LIFT_TESC - clustering on D2!\")\r\n if self.D2.check_empty() != True:\r\n if self.D2.check_label_similarity() == True:\r\n self.final_clusters.append(self.D2)\r\n else:\r\n lt = LiftTesc(n_labels=self.n_labels, n_features=self.n_features)\r\n lt.fit(self.D2.X1, self.D2.Y1[:, 1:], self.D2.X2, np.append(self.L1, self.lamda), self.L2[self.L2 != self.lamda])\r\n self.final_clusters.extend(lt.get_clusters())\r\n return \r\n\r\n def clustering_on_D3(self):\r\n \"\"\"Perform clustering on D3\r\n Method: \r\n Add D3 to a set of output clusters if all data in D3 contains the same labels.\r\n Otherwise: \r\n Call LiftTesc on D3\r\n Parameters: \r\n :param D3: A cluster of labeled data and unlabeled data which are assigned to lambda3.\r\n :type D3: An object of Cluster\r\n Returns:\r\n :returns: A list of output clusters\r\n :rtype: A list [].\r\n \"\"\"\r\n #print(\"LIFT_TESC - clustering on D3!\")\r\n if self.D3.check_empty() != True:\r\n if self.D3.check_label_similarity() == True:\r\n self.final_clusters.append(self.D3)\r\n else:\r\n lt = LiftTesc(n_labels=self.n_labels, n_features=self.n_features)\r\n lt.fit(self.D3.X1, self.D3.Y1[:, 1:], self.D3.X2, self.L1, self.L2[self.L2 != self.lamda])\r\n self.final_clusters.extend(lt.get_clusters())\r\n return \r\n\r\n def get_clusters(self):\r\n \"\"\"Get output clusters method\r\n Parameters: \r\n {none}\r\n Returns:\r\n :returns: A list of output clusters\r\n :rtype: A list [].\r\n \"\"\"\r\n return self.final_clusters\r\n\r\n def print_clusters(self):\r\n \"\"\"Print output clusters method\r\n Print the information of clusters in the output list\r\n Parameters: \r\n {none}\r\n Returns:\r\n {none}\r\n \"\"\"\r\n text_file = open(\"Output.txt\", \"w\")\r\n myStr = \"\"\r\n for cl in self.final_clusters:\r\n myStr += cl.toString() + \"\\n\"\r\n text_file.write(myStr)\r\n text_file.close()\r\n return \r\n\r\n#DEBUG\r\nif __name__ == \"__main__\":\r\n lt = LiftTesc(n_labels=5)\r\n label_data = pd.read_csv(\"label.csv\")\r\n unlabel_data = pd.read_csv(\"unlabel.csv\")\r\n X1 = label_data.iloc[:,5:]\r\n Y1 = label_data.iloc[:,:5]\r\n X2 = unlabel_data\r\n X1 = np.array(X1)\r\n Y1 = np.array(Y1)\r\n X2 = np.array(X2)\r\n L1 = np.array([], dtype=int)\r\n L2 = np.array([0,1,2,3,4], dtype=int)\r\n lt.fit(X1, Y1, X2, L1, L2)\r\n lt.print_clusters()\r\n \r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"davidnvq/MASS","sub_path":"mass/liftTesc.py","file_name":"liftTesc.py","file_ext":"py","file_size_in_byte":12279,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"38823639133","text":"from stdatamodels.jwst import datamodels\n\nfrom ..stpipe import Step\nfrom stcal.dark_current import dark_sub\n\n\n__all__ = [\"DarkCurrentStep\"]\n\n\nclass DarkCurrentStep(Step):\n \"\"\"\n DarkCurrentStep: Performs dark current correction by subtracting\n dark current reference data from the input science data model.\n \"\"\"\n\n class_alias = \"dark_current\"\n\n spec = \"\"\"\n dark_output = output_file(default = None) # Dark model or averaged dark subtracted\n \"\"\"\n\n reference_file_types = ['dark']\n\n def process(self, input):\n\n # Open the input data model\n with datamodels.RampModel(input) as input_model:\n\n # Get the name of the dark reference file to use\n self.dark_name = self.get_reference_file(input_model, 'dark')\n self.log.info('Using DARK reference file %s', self.dark_name)\n\n # Check for a valid reference file\n if self.dark_name == 'N/A':\n self.log.warning('No DARK reference file found')\n self.log.warning('Dark current step will be skipped')\n result = input_model.copy()\n result.meta.cal_step.dark = 'SKIPPED'\n return result\n\n # Create name for the intermediate dark, if desired.\n dark_output = self.dark_output\n if dark_output is not None:\n dark_output = self.make_output_path(\n basepath=dark_output,\n suffix=False\n )\n\n # Open the dark ref file data model - based on Instrument\n instrument = input_model.meta.instrument.name\n if instrument == 'MIRI':\n dark_model = datamodels.DarkMIRIModel(self.dark_name)\n else:\n dark_model = datamodels.DarkModel(self.dark_name)\n\n # Do the dark correction\n result = dark_sub.do_correction(\n input_model, dark_model, dark_output\n )\n\n out_data, dark_data = result\n\n if dark_data is not None and dark_data.save:\n save_dark_data_as_dark_model(dark_data, dark_model, instrument)\n dark_model.close()\n\n out_ramp = dark_output_data_2_ramp_model(out_data, input_model)\n\n return out_ramp\n\n\ndef save_dark_data_as_dark_model(dark_data, dark_model, instrument):\n \"\"\"\n Save dark data from the dark current step as the appropriate dark model.\n\n Parameters\n ----------\n dark_data: DarkData\n Dark data used in the dark current step.\n\n dark_model: DarkMIRIModel or DarkModel\n The input dark model from reference.\n\n instrument: str\n The instrument name.\n \"\"\"\n if instrument == \"MIRI\":\n out_dark_model = datamodels.DarkMIRIModel(\n data=dark_data.data,\n dq=dark_data.groupdq,\n err=dark_data.err)\n else:\n out_dark_model = datamodels.DarkModel(\n data=dark_data.data,\n dq=dark_data.groupdq,\n err=dark_data.err)\n out_dark_model.update(dark_model)\n\n out_dark_model.meta.exposure.nframes = dark_data.exp_nframes\n out_dark_model.meta.exposure.ngroups = dark_data.exp_ngroups\n out_dark_model.meta.exposure.groupgap = dark_data.exp_groupgap\n out_dark_model.save(dark_data.output_name)\n out_dark_model.close()\n\n\ndef dark_output_data_2_ramp_model(out_data, input_model):\n \"\"\"\n Convert computed output data from the dark step to a RampModel.\n\n Parameters\n ----------\n out_data: ScienceData\n Computed science data from the dark current step.\n\n input_model: RampModel\n The input ramp model from which to subtract the dark current.\n\n Return\n ------\n out_model: RampModel\n The output ramp model from the dark current step.\n \"\"\"\n\n if out_data.cal_step == \"SKIPPED\":\n # If processing was skipped in the lower-level routines,\n # just return the unmodified input model\n input_model.meta.cal_step.dark_sub = \"SKIPPED\"\n return input_model\n else:\n out_model = input_model.copy()\n out_model.meta.cal_step.dark_sub = out_data.cal_step\n out_model.data = out_data.data\n out_model.groupdq = out_data.groupdq\n out_model.pixeldq = out_data.pixeldq\n out_model.err = out_data.err\n return out_model\n","repo_name":"spacetelescope/jwst","sub_path":"jwst/dark_current/dark_current_step.py","file_name":"dark_current_step.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"62"} +{"seq_id":"74321124356","text":"#!/usr/bin/env python\n\"\"\"\n#\n# Description:\n# takes one moment log and reverses the order of the entries\n#\n# useful to put oldest first (opposite of normal direction)\n\n# By: Charles Brandt [code at contextiskey dot com]\n# On: 2009.03.27 22:59:09 \n# License: MIT\n\n# Requires: moments\n\"\"\"\nfrom __future__ import print_function\n\nimport sys, os\nfrom moments.journal import Journal\nfrom moments.log import Log\n\ndef reverse_log(f1, f2=\"temp.txt\"):\n \"\"\"\n \"\"\"\n result = ''\n \n j = Journal()\n j.load(f1)\n\n l = Log(f2)\n l.from_entries(j.sort(\"reverse\"))\n l.to_file()\n l.close()\n \ndef main():\n if len (sys.argv) > 1:\n if sys.argv[1] in ['--help','help'] or len(sys.argv) < 2:\n usage()\n f1 = sys.argv[1]\n if len(sys.argv) > 2:\n f2 = sys.argv[2]\n else:\n f2 = \"temp.txt\"\n reverse_log(f1, f2)\n print(\"%s reversed and saved in: %s\" % (f1, f2))\n \nif __name__ == '__main__':\n main()\n","repo_name":"charlesbrandt/mindstream","sub_path":"scripts/reverse_log.py","file_name":"reverse_log.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73935619718","text":"from __future__ import division\nfrom builtins import str\nfrom builtins import map\nfrom builtins import range\nimport ephem\n\nimport astropy.time as at\nimport numpy as np\nimport scipy.special as ss\n\nfrom ehtim.const_def import *\n\n##################################################################################################\n# Other Functions\n##################################################################################################\n\ndef compute_uv_coordinates(array, site1, site2, time, mjd, ra, dec, rf, timetype='UTC', elevmin=ELEV_LOW, elevmax=ELEV_HIGH, fix_theta_GMST = False):\n\n if not isinstance(time, np.ndarray): time = np.array([time]).flatten()\n if not isinstance(site1, np.ndarray): site1 = np.array([site1]).flatten()\n if not isinstance(site2, np.ndarray): site2 = np.array([site2]).flatten()\n\n if len(site1) == len(site2) == 1:\n site1 = np.array([site1[0] for i in range(len(time))])\n site2 = np.array([site2[0] for i in range(len(time))])\n elif not (len(site1) == len(site2) == len(time)):\n raise Exception(\"site1, site2, and time not the same dimension in compute_uv_coordinates!\") \n\n # Source vector\n sourcevec = np.array([np.cos(dec*DEGREE), 0, np.sin(dec*DEGREE)])\n projU = np.cross(np.array([0,0,1]), sourcevec)\n projU = projU/np.linalg.norm(projU)\n projV = -np.cross(projU, sourcevec)\n\n # Wavelength\n l = C/rf\n\n #ANDREW TODO DOES THIS WORK\n if timetype=='GMST':\n time_sidereal = time\n time_utc = gmst_to_utc(time, mjd)\n elif timetype=='UTC':\n time_sidereal = utc_to_gmst(time, mjd)\n time_utc = time\n else: raise Exception(\"timetype must be UTC or GMST!\")\n\n fracmjd = np.floor(mjd) + time/24.\n dto = (at.Time(fracmjd, format='mjd')).datetime\n theta = np.mod((time_sidereal - ra)*HOUR, 2*np.pi)\n if type(fix_theta_GMST) != bool:\n theta = np.mod((fix_theta_GMST - ra)*HOUR, 2*np.pi)\n\n i1 = np.array([array.tkey[site] for site in site1])\n i2 = np.array([array.tkey[site] for site in site2])\n\n coord1 = np.vstack((array.tarr[i1]['x'], array.tarr[i1]['y'], array.tarr[i1]['z'])).T\n coord2 = np.vstack((array.tarr[i2]['x'], array.tarr[i2]['y'], array.tarr[i2]['z'])).T\n\n # TODO SPEED UP!??\n # use spacecraft ephemeris to get position of site 1\n spacemask1 = [np.all(coord == (0.,0.,0.)) for coord in coord1]\n if np.any(spacemask1):\n if timetype=='GMST':\n raise Exception(\"Spacecraft ephemeris only work with UTC!\")\n\n site1space_list = site2[spacemask1]\n site1space_dtolist = dto[spacemask1]\n coord1space = []\n for k in range(len(site1space_list)):\n site1space = site1space_list[k]\n dto_no2 = dto[k]\n sat = ephem.readtle(array.ephem[site1space][0],array.ephem[site1space][1],array.ephem[site1space][2])\n sat.compute(dto) # often complains if ephemeris out of date!\n elev = sat.elevation\n lat = sat.sublat / DEGREE\n lon = sat.sublong / DEGREE\n # pyephem doesn't use an ellipsoid earth model!\n c1 = coords.EarthLocation.from_geodetic(lon, lat, elev, ellipsoid=None)\n c1 = np.array((c1.x.value, c1.y.value, c1.z.value))\n coord1space.append(c1)\n coord1space = np.array(c1)\n coord1[spacemask1] = coord1space\n\n spacemask2 = [np.all(coord == (0.,0.,0.)) for coord in coord2]\n if np.any(spacemask2):\n if timetype=='GMST':\n raise Exception(\"Spacecraft ephemeris only work with UTC!\")\n\n site2space_list = site2[spacemask2]\n site2space_dtolist = dto[spacemask2]\n coord2space = []\n for k in range(len(site2space_list)):\n site2space = site2space_list[k]\n dto_now = dto[k]\n sat = ephem.readtle(array.ephem[site2space][0],array.ephem[site2space][1],array.ephem[site2space][2])\n sat.compute(dto_now) # often complains if ephemeris out of date!\n elev = sat.elevation\n lat = sat.sublat / DEGREE\n lon = sat.sublong / DEGREE\n # pyephem doesn't use an ellipsoid earth model!\n c2 = coords.EarthLocation.from_geodetic(lon, lat, elev, ellipsoid=None)\n c2 = np.array((c2.x.value, c2.y.value, c2.z.value))\n coord2space.append(c2)\n coord2space = np.array(c2)\n coord2[spacemask2] = coord2space\n\n # rotate the station coordinates with the earth\n coord1 = earthrot(coord1, theta)\n coord2 = earthrot(coord2, theta)\n\n # u,v coordinates\n u = np.dot((coord1 - coord2)/l, projU) # u (lambda)\n v = np.dot((coord1 - coord2)/l, projV) # v (lambda)\n\n # mask out below elevation cut\n mask = (elevcut(coord1, sourcevec, elevmin=elevmin, elevmax=elevmax) *\n elevcut(coord2, sourcevec, elevmin=elevmin, elevmax=elevmax))\n\n time = time[mask]\n u = u[mask]\n v = v[mask]\n \n # return times and uv points where we have data\n return (time, u, v)\n\ndef make_bispectrum(l1, l2, l3,vtype):\n \"\"\"make a list of bispectra and errors\n l1,l2,l3 are full datatables of visibility entries\n vtype is visibility types\n \"\"\"\n # Choose the appropriate polarization and compute the bs and err\n if vtype in [\"vis\", \"qvis\", \"uvis\",\"vvis\"]:\n if vtype=='vis': sigmatype='sigma'\n if vtype=='qvis': sigmatype='qsigma'\n if vtype=='uvis': sigmatype='usigma'\n if vtype=='vvis': sigmatype='vsigma'\n\n p1 = l1[vtype]\n p2 = l2[vtype]\n p3 = l3[vtype]\n\n var1 = l1[sigmatype]**2\n var2 = l2[sigmatype]**2\n var3 = l3[sigmatype]**2\n\n elif vtype == \"rrvis\":\n p1 = l1['vis'] + l1['vvis']\n p2 = l2['vis'] + l2['vvis']\n p3 = l3['vis'] + l3['vvis']\n \n var1 = l1['sigma']**2 + l1['vsigma']**2\n var2 = l2['sigma']**2 + l2['vsigma']**2\n var3 = l3['sigma']**2 + l3['vsigma']**2\n\n elif vtype == \"llvis\":\n p1 = l1['vis'] - l1['vvis']\n p2 = l2['vis'] - l2['vvis']\n p3 = l3['vis'] - l3['vvis']\n \n var1 = l1['sigma']**2 + l1['vsigma']**2\n var2 = l2['sigma']**2 + l2['vsigma']**2\n var3 = l3['sigma']**2 + l3['vsigma']**2\n\n elif vtype == \"lrvis\":\n p1 = l1['qvis'] - 1j*l1['uvis']\n p2 = l2['qvis'] - 1j*l2['uvis']\n p3 = l3['qvis'] - 1j*l3['uvis']\n \n var1 = l1['qsigma']**2 + l1['usigma']**2\n var2 = l2['qsigma']**2 + l2['usigma']**2\n var3 = l3['qsigma']**2 + l3['usigma']**2\n\n elif vtype in [\"pvis\",\"rlvis\"]:\n p1 = l1['qvis'] + 1j*l2['uvis']\n p2 = l2['qvis'] + 1j*l2['uvis']\n p3 = l3['qvis'] + 1j*l3['uvis']\n bi = p1 * p2 * p3\n\n var1 = l1['qsigma']**2 + l1['usigma']**2\n var2 = l2['qsigma']**2 + l2['usigma']**2\n var3 = l3['qsigma']**2 + l3['usigma']**2\n\n bi = p1*p2*p3\n bisig = np.abs(bi) * np.sqrt(var1/np.abs(p1)**2 +\n var2/np.abs(p2)**2 +\n var3/np.abs(p3)**2)\n # Katie's 2nd + 3rd order corrections - see CHIRP supplement\n #bisig = np.sqrt(bisig**2 + var1*var2*np.abs(p3)**2 +\n # var1*var3*np.abs(p2)**2 +\n # var2*var3*np.abs(p1)**2 +\n # var1*var2*var3)\n return (bi, bisig)\n\ndef make_closure_amplitude(red1, red2, blue1, blue2, vtype, ctype='camp', debias=True, debias_type='old'):\n \"\"\"make a list of closure amplitudes and errors\n red1 and red2 are full datatables of numerator entries\n blue1 and blue2 are full datatables denominator entries\n vtype is the visibility type\n we always debias the individual amplitudes\n debias controls if we debias the closure amplitude at the end\n DebiasType controls the type of debisaing, 'ExactLog' means \n exact debiasing in log space, it will turn off any debiasing in 'amp_debias',\n and apply debiasing only to closure quantities \n \"\"\"\n\n DebiasType = debias_type\n\n if not (ctype in ['camp', 'logcamp']):\n raise Exception(\"closure amplitude type must be 'camp' or 'logcamp'!\")\n\n if vtype in [\"vis\", \"qvis\", \"uvis\", \"vvis\"]:\n if vtype=='vis': sigmatype='sigma'\n if vtype=='qvis': sigmatype='qsigma'\n if vtype=='uvis': sigmatype='usigma'\n if vtype=='vvis': sigmatype='vsigma'\n\n sig1 = blue1[sigmatype]\n sig2 = blue2[sigmatype]\n sig3 = red1[sigmatype]\n sig4 = red2[sigmatype]\n\n p1 = amp_debias(blue1[vtype], sig1,DebiasType)\n p2 = amp_debias(blue2[vtype], sig2,DebiasType)\n p3 = amp_debias(red1[vtype], sig3,DebiasType)\n p4 = amp_debias(red2[vtype], sig4,DebiasType)\n\n elif vtype == \"rrvis\":\n sig1 = np.sqrt(blue1['sigma']**2 + blue1['vsigma']**2)\n sig2 = np.sqrt(blue2['sigma']**2 + blue2['vsigma']**2)\n sig3 = np.sqrt(red1['sigma']**2 + red1['vsigma']**2)\n sig4 = np.sqrt(red2['sigma']**2 + red2['vsigma']**2)\n\n p1 = amp_debias(blue1['vis'] + blue1['vvis'], sig1,DebiasType)\n p2 = amp_debias(blue2['vis'] + blue2['vvis'], sig2,DebiasType)\n p3 = amp_debias(red1['vis'] + red1['vvis'], sig3,DebiasType)\n p4 = amp_debias(red2['vis'] + red2['vvis'], sig4,DebiasType)\n\n elif vtype == \"llvis\":\n sig1 = np.sqrt(blue1['sigma']**2 + blue1['vsigma']**2)\n sig2 = np.sqrt(blue2['sigma']**2 + blue2['vsigma']**2)\n sig3 = np.sqrt(red1['sigma']**2 + red1['vsigma']**2)\n sig4 = np.sqrt(red2['sigma']**2 + red2['vsigma']**2)\n\n p1 = amp_debias(blue1['vis'] - blue1['vvis'], sig1,DebiasType)\n p2 = amp_debias(blue2['vis'] - blue2['vvis'], sig2,DebiasType)\n p3 = amp_debias(red1['vis'] - red1['vvis'], sig3,DebiasType)\n p4 = amp_debias(red2['vis'] - red2['vvis'], sig4,DebiasType)\n\n elif vtype == \"lrvis\":\n sig1 = np.sqrt(blue1['qsigma']**2 + blue1['usigma']**2)\n sig2 = np.sqrt(blue2['qsigma']**2 + blue2['usigma']**2)\n sig3 = np.sqrt(red1['qsigma']**2 + red1['usigma']**2)\n sig4 = np.sqrt(red2['qsigma']**2 + red2['usigma']**2)\n\n p1 = amp_debias(blue1['qvis'] - 1j*blue1['uvis'], sig1)\n p2 = amp_debias(blue2['qvis'] - 1j*blue2['uvis'], sig2)\n p3 = amp_debias(red1['qvis'] - 1j*red1['uvis'], sig3)\n p4 = amp_debias(red2['qvis'] - 1j*red2['uvis'], sig4)\n\n elif vtype in [\"pvis\",\"rlvis\"]:\n sig1 = np.sqrt(blue1['qsigma']**2 + blue1['usigma']**2)\n sig2 = np.sqrt(blue2['qsigma']**2 + blue2['usigma']**2)\n sig3 = np.sqrt(red1['qsigma']**2 + red1['usigma']**2)\n sig4 = np.sqrt(red2['qsigma']**2 + red2['usigma']**2)\n\n p1 = amp_debias(blue1['qvis'] + 1j*blue1['uvis'], sig1,DebiasType)\n p2 = amp_debias(blue2['qvis'] + 1j*blue2['uvis'], sig2,DebiasType)\n p3 = amp_debias(red1['qvis'] + 1j*red1['uvis'], sig3,DebiasType)\n p4 = amp_debias(red2['qvis'] + 1j*red2['uvis'], sig4,DebiasType)\n\n snr1 = p1/sig1\n snr2 = p2/sig2\n snr3 = p3/sig3\n snr4 = p4/sig4\n\n if ctype=='camp':\n camp = np.abs((p1*p2)/(p3*p4))\n camperr = camp * np.sqrt(1./(snr1**2) + 1./(snr2**2) + 1./(snr3**2) + 1./(snr4**2))\n\n # Debias\n if debias:\n if DebiasType=='ExactLog':\n snr1 = get_snr(snr1)\n snr2 = get_snr(snr2)\n snr3 = get_snr(snr3)\n snr4 = get_snr(snr4)\n camp = camp_debias(camp, snr3, snr4,snr1,snr2,'ExactLog')\n else:\n camp = camp_debias(camp, snr3, snr4)\n\n elif ctype=='logcamp':\n camp = np.log(np.abs(p1)) + np.log(np.abs(p2)) - np.log(np.abs(p3)) - np.log(np.abs(p4))\n camperr = np.sqrt(1./(snr1**2) + 1./(snr2**2) + 1./(snr3**2) + 1./(snr4**2))\n\n # Debias\n if debias:\n if DebiasType=='ExactLog':\n snr1 = get_snr(snr1)\n snr2 = get_snr(snr2)\n snr3 = get_snr(snr3)\n snr4 = get_snr(snr4)\n camp = logcamp_debias(camp, snr1, snr2,snr3,snr4,'ExactLog')\n else: \n camp = logcamp_debias(camp, snr1, snr2, snr3, snr4)\n\n return (camp, camperr)\n\n#MW---OCT---2017\ndef get_snr_help(Esnr):\n \"\"\"estimates snr given a single biased snr measurement\n \"\"\"\n if Esnr**2 >= 2.0: \n return np.sqrt(Esnr**2 - 1.0)\n else:\n return 1.0\n\ndef get_snr(Esnr):\n \"\"\"\"applies get_snr_help on vector\n \"\"\"\n if type(Esnr) == float or type(Esnr)==np.float64:\n return get_snr_help(Esnr)\n else:\n return np.asarray(map(get_snr_help,Esnr))\n\ndef log_debias(snr0):\n \"\"\"debias log snr\n \"\"\"\n snr0 = np.asarray(snr0)\n return -ss.expi(-snr0**2/2.)/2.\n\n\ndef amp_debias(amp, sigma, DebiasType='old'):\n \"\"\"Return debiased visibility amplitudes\n \"\"\"\n\n if DebiasType=='ExactLog':\n #don't debias at all in this case, all debiasing will happen later for closure quantities\n #snr0 = amp/sigma\n #return amp*np.exp(-log_debias(snr0))\n return amp\n \n else:\n deb2 = np.abs(amp)**2 - np.abs(sigma)**2\n if type(deb2) == float or type(deb2)==np.float64:\n if deb2 < 0.0: return np.abs(amp)\n else: return np.sqrt(deb2)\n else:\n lowsnr = deb2 < 0.0\n deb2[lowsnr] = np.abs(amp[lowsnr])**2\n return np.sqrt(deb2)\n\ndef camp_debias(camp, snr3, snr4,snr1=1e5,snr2=1e5,DebiasType='old'):\n \"\"\"Debias closure amplitudes\n snr3 and snr4 are snr of visibility amplitudes # 3 and 4.\n \"\"\"\n if DebiasType=='ExactLog':\n camp_debias = camp*np.exp( - log_debias(snr1) - log_debias(snr2) + log_debias(snr3) + log_debias(snr4) )\n else:\n camp_debias = camp / (1 + 1./(snr3**2) + 1./(snr4**2))\n return camp_debias\n\ndef logcamp_debias(log_camp, snr1, snr2, snr3, snr4,DebiasType='old'):\n \"\"\"Debias log closure amplitudes\n The snrs are the snr of visibility amplitudes\n \"\"\"\n if DebiasType=='ExactLog':\n log_camp_debias = log_camp - log_debias(snr1) - log_debias(snr2) + log_debias(snr3) + log_debias(snr4)\n else:\n log_camp_debias = log_camp + 0.5*(1./(snr1**2) + 1./(snr2**2) - 1./(snr3**2) - 1./(snr4**2))\n return log_camp_debias\n\ndef gauss_uv(u, v, flux, beamparams, x=0., y=0.):\n \"\"\"Return the value of the Gaussian FT with\n beamparams is [FWHMmaj, FWHMmin, theta, x, y], all in radian\n theta is the orientation angle measured E of N\n \"\"\"\n\n sigma_maj = beamparams[0]/(2*np.sqrt(2*np.log(2)))\n sigma_min = beamparams[1]/(2*np.sqrt(2*np.log(2)))\n theta = -beamparams[2] # theta needs to be negative in this convention!\n #try:\n #\tx=beamparams[3]\n #\ty=beamparams[4]\n #except IndexError:\n #\tx=y=0.0\n\n # Covariance matrix\n a = (sigma_min * np.cos(theta))**2 + (sigma_maj*np.sin(theta))**2\n b = (sigma_maj * np.cos(theta))**2 + (sigma_min*np.sin(theta))**2\n c = (sigma_min**2 - sigma_maj**2) * np.cos(theta) * np.sin(theta)\n m = np.array([[a, c], [c, b]])\n\n uv = np.array([[u[i],v[i]] for i in range(len(u))])\n x2 = np.array([np.dot(uvi,np.dot(m,uvi)) for uvi in uv])\n #x2 = np.dot(uv, np.dot(m, uv.T))\n g = np.exp(-2 * np.pi**2 * x2)\n p = np.exp(-2j * np.pi * (u*x + v*y))\n\n return flux * g * p\n\ndef sgra_kernel_uv(rf, u, v):\n \"\"\"Return the value of the Sgr A* scattering kernel at a given u,v pt (in lambda),\n at a given frequency rf (in Hz).\n Values from Bower et al.\n \"\"\"\n\n lcm = (C/rf) * 100 # in cm\n sigma_maj = FWHM_MAJ * (lcm**2) / (2*np.sqrt(2*np.log(2))) * RADPERUAS\n sigma_min = FWHM_MIN * (lcm**2) / (2*np.sqrt(2*np.log(2))) * RADPERUAS\n theta = -POS_ANG * DEGREE # theta needs to be negative in this convention!\n\n #bp = [fwhm_maj, fwhm_min, theta]\n #g = gauss_uv(u, v, 1., bp, x=0., y=0.)\n\n # Covariance matrix\n a = (sigma_min * np.cos(theta))**2 + (sigma_maj*np.sin(theta))**2\n b = (sigma_maj * np.cos(theta))**2 + (sigma_min*np.sin(theta))**2\n c = (sigma_min**2 - sigma_maj**2) * np.cos(theta) * np.sin(theta)\n m = np.array([[a, c], [c, b]])\n uv = np.array([u,v])\n\n x2 = np.dot(uv, np.dot(m, uv))\n g = np.exp(-2 * np.pi**2 * x2)\n\n return g\n\ndef sgra_kernel_params(rf):\n \"\"\"Return elliptical gaussian parameters in radian for the Sgr A* scattering ellipse at a given frequency\n Values from Bower et al.\n \"\"\"\n\n lcm = (C/rf) * 100 # in cm\n fwhm_maj_rf = FWHM_MAJ * (lcm**2) * RADPERUAS\n fwhm_min_rf = FWHM_MIN * (lcm**2) * RADPERUAS\n theta = POS_ANG * DEGREE\n\n return np.array([fwhm_maj_rf, fwhm_min_rf, theta])\n\n\ndef blnoise(sefd1, sefd2, tint, bw):\n \"\"\"Determine the standard deviation of Gaussian thermal noise on a baseline\n This is the noise on the rr/ll/rl/lr correlation, not the stokes parameter\n 2-bit quantization is responsible for the 0.88 factor\n \"\"\"\n\n #!AC TODO Is the factor of sqrt(2) correct?\n #noise = np.sqrt(sefd1*sefd2/(2*bw*tint))/0.88\n\n noise = np.sqrt(sefd1*sefd2/(bw*tint))/0.88\n\n return noise\n\ndef merr(sigma, qsigma, usigma, I, m):\n \"\"\"Return the error in mbreve real and imaginary parts\"\"\"\n\n err = np.sqrt((qsigma**2 + usigma**2 + (sigma*np.abs(m))**2)/(np.abs(I) ** 2))\n # old formula assumes all sigmas the same\n #err = sigma * np.sqrt((2 + np.abs(m)**2)/ (np.abs(I) ** 2))\n return err\n\ndef cerror(sigma):\n \"\"\"Return a complex number drawn from a circular complex Gaussian of zero mean\n \"\"\"\n return np.random.normal(loc=0,scale=sigma) + 1j*np.random.normal(loc=0,scale=sigma)\n\ndef hashrandn(*args):\n \"\"\"set the seed according to a collection of arguments and return random gaussian var\n \"\"\"\n np.random.seed(hash(\",\".join(map(repr,args))) % 4294967295)\n return np.random.randn()\n\ndef hashrand(*args):\n \"\"\"set the seed according to a collection of arguments and return random number in 0,1\n \"\"\"\n np.random.seed(hash(\",\".join(map(repr,args))) % 4294967295)\n return np.random.rand()\n\ndef image_centroid(im):\n \"\"\"Return the image centroid (in radians)\n \"\"\"\n\n xlist = np.arange(0,-im.xdim,-1)*im.psize + (im.psize*im.xdim)/2.0 - im.psize/2.0\n ylist = np.arange(0,-im.ydim,-1)*im.psize + (im.psize*im.ydim)/2.0 - im.psize/2.0\n\n x0 = np.sum(np.outer(0.0*ylist+1.0, xlist).ravel()*im.imvec)/np.sum(im.imvec)\n y0 = np.sum(np.outer(ylist, 0.0*xlist+1.0).ravel()*im.imvec)/np.sum(im.imvec)\n\n return np.array([x0, y0])\n\ndef ftmatrix(pdim, xdim, ydim, uvlist, pulse=PULSE_DEFAULT, mask=[]):\n \"\"\"Return a DFT matrix for the xdim*ydim image with pixel width pdim\n that extracts spatial frequencies of the uv points in uvlist.\n \"\"\"\n\n xlist = np.arange(0,-xdim,-1)*pdim + (pdim*xdim)/2.0 - pdim/2.0\n ylist = np.arange(0,-ydim,-1)*pdim + (pdim*ydim)/2.0 - pdim/2.0\n\n # original sign convention\n #ftmatrices = [pulse(2*np.pi*uv[0], 2*np.pi*uv[1], pdim, dom=\"F\") * np.outer(np.exp(-2j*np.pi*ylist*uv[1]), np.exp(-2j*np.pi*xlist*uv[0])) for uv in uvlist] #list of matrices at each freq\n\n # changed the sign convention to agree with BU data (Jan 2017)\n ftmatrices = [pulse(2*np.pi*uv[0], 2*np.pi*uv[1], pdim, dom=\"F\") * np.outer(np.exp(2j*np.pi*ylist*uv[1]), np.exp(2j*np.pi*xlist*uv[0])) for uv in uvlist] #list of matrices at each freq\n\n ftmatrices = np.reshape(np.array(ftmatrices), (len(uvlist), xdim*ydim))\n\n if len(mask):\n ftmatrices = ftmatrices[:,mask]\n\n return ftmatrices\n\ndef ftmatrix_centered(im, pdim, xdim, ydim, uvlist, pulse=PULSE_DEFAULT):\n \"\"\"Return a DFT matrix for the xdim*ydim image with pixel width pdim\n that extracts spatial frequencies of the uv points in uvlist.\n in this version, it puts the image centroid at the origin\n \"\"\"\n\n # !AC TODO : there is a residual value for the center being around 0, maybe we should chop this off to be exactly 0\n # Coordinate matrix for COM constraint\n xlist = np.arange(0,-xdim,-1)*pdim + (pdim*xdim)/2.0 - pdim/2.0\n ylist = np.arange(0,-ydim,-1)*pdim + (pdim*ydim)/2.0 - pdim/2.0\n x0 = np.sum(np.outer(0.0*ylist+1.0, xlist).ravel()*im)/np.sum(im)\n y0 = np.sum(np.outer(ylist, 0.0*xlist+1.0).ravel()*im)/np.sum(im)\n\n #Now shift the lists\n xlist = xlist - x0\n ylist = ylist - y0\n\n ftmatrices = [pulse(2*np.pi*uv[0], 2*np.pi*uv[1], pdim, dom=\"F\") * np.outer(np.exp(-2j*np.pi*ylist*uv[1]), np.exp(-2j*np.pi*xlist*uv[0])) for uv in uvlist] #list of matrices at each freq\n ftmatrices = np.reshape(np.array(ftmatrices), (len(uvlist), xdim*ydim))\n return ftmatrices\n\n\n\ndef ticks(axisdim, psize, nticks=8):\n \"\"\"Return a list of ticklocs and ticklabels\n psize should be in desired units\n \"\"\"\n\n axisdim = int(axisdim)\n nticks = int(nticks)\n if not axisdim % 2: axisdim += 1\n if nticks % 2: nticks -= 1\n tickspacing = float((axisdim-1))/nticks\n ticklocs = np.arange(0, axisdim+1, tickspacing) - 0.5\n ticklabels= np.around(psize * np.arange((axisdim-1)/2.0, -(axisdim)/2.0, -tickspacing), decimals=1)\n return (ticklocs, ticklabels)\n\ndef power_of_two(target):\n \"\"\"Finds the next greatest power of two\n \"\"\"\n cur = 1\n if target > 1:\n for i in range(0, int(target)):\n if (cur >= target):\n return cur\n else: cur *= 2\n else:\n return 1\n\n\ndef paritycompare(perm1, perm2):\n \"\"\"Compare the parity of two permutations.\n Assume both lists are equal length and with same elements\n Copied from: http://stackoverflow.com/questions/1503072/how-to-check-if-permutations-have-equal-parity\n \"\"\"\n\n perm2 = list(perm2)\n perm2_map = dict((v, i) for i,v in enumerate(perm2))\n transCount=0\n for loc, p1 in enumerate(perm1):\n p2 = perm2[loc]\n if p1 != p2:\n sloc = perm2_map[p1]\n perm2[loc], perm2[sloc] = p1, p2\n perm2_map[p1], perm2_map[p2] = sloc, loc\n transCount += 1\n\n if not (transCount % 2): return 1\n else: return -1\n\n'''\ndef amp_debias(vis, sigma):\n \"\"\"Return debiased visibility amplitudes\n \"\"\"\n\n # !AC TODO: what to do if deb2 < 0? Currently we do nothing\n deb2 = np.abs(vis)**2 - np.abs(sigma)**2\n\n # alternative with no low-snr option: np.abs(np.abs(vis)**2 - np.abs(sigma)**2)**0.5*(np.abs(vis) > np.abs(sigma))\n\n if type(deb2) == float or type(deb2)==np.float64:\n if deb2 < 0.0: return np.abs(vis)\n else: return np.sqrt(deb2)\n else:\n lowsnr = deb2 < 0.0\n deb2[lowsnr] = np.abs(vis[lowsnr])**2\n return np.sqrt(deb2)\n'''\n\ndef sigtype(datatype):\n \"\"\"Return the type of noise corresponding to the data type\n \"\"\"\n\n datatype = str(datatype)\n if datatype in ['vis', 'amp']: sigmatype='sigma'\n elif datatype in ['qvis', 'qamp']: sigmatype='qsigma'\n elif datatype in ['uvis', 'uamp']: sigmatype='usigma'\n elif datatype in ['vvis', 'vamp']: sigmatype='vsigma'\n elif datatype in ['pvis', 'pamp']: sigmatype='psigma'\n elif datatype in ['pvis', 'pamp']: sigmatype='psigma'\n elif datatype in ['rrvis', 'rramp']: sigmatype='rrsigma'\n elif datatype in ['llvis', 'llamp']: sigmatype='llsigma'\n elif datatype in ['rlvis', 'rlamp']: sigmatype='rlsigma'\n elif datatype in ['lrvis', 'lramp']: sigmatype='lrsigma'\n elif datatype in ['m', 'mamp']: sigmatype='msigma'\n elif datatype in ['phase']: sigmatype='sigma_phase'\n elif datatype in ['qphase']: sigmatype='qsigma_phase'\n elif datatype in ['uphase']: sigmatype='usigma_phase'\n elif datatype in ['vphase']: sigmatype='vsigma_phase'\n elif datatype in ['pphase']: sigmatype='psigma_phase'\n elif datatype in ['mphase']: sigmatype='msigma_phase'\n elif datatype in ['rrphase']: sigmatype='rrsigma_phase'\n elif datatype in ['llphase']: sigmatype='llsigma_phase'\n elif datatype in ['rlphase']: sigmatype='rlsigma_phase'\n elif datatype in ['lrphase']: sigmatype='lrsigma_phase'\n\n else: sigmatype = False\n\n return sigmatype\n\n\ndef rastring(ra):\n \"\"\"Convert a ra in fractional hours to formatted string\n \"\"\"\n h = int(ra)\n m = int((ra-h)*60.)\n s = (ra-h-m/60.)*3600.\n out = \"%2i h %2i m %2.4f s\" % (h,m,s)\n return out\n\ndef decstring(dec):\n \"\"\"Convert a dec in fractional degrees to formatted string\n \"\"\"\n\n deg = int(dec)\n m = int((abs(dec)-abs(deg))*60.)\n s = (abs(dec)-abs(deg)-m/60.)*3600.\n out = \"%2i deg %2i m %2.4f s\" % (deg,m,s)\n return out\n\ndef gmtstring(gmt):\n \"\"\"Convert a gmt in fractional hours to formatted string\n \"\"\"\n\n if gmt > 24.0: gmt = gmt-24.0\n h = int(gmt)\n m = int((gmt-h)*60.)\n s = (gmt-h-m/60.)*3600.\n out = \"%02i:%02i:%2.4f\" % (h,m,s)\n return out\n\n#TODO fix this hacky way to do it!!\ndef gmst_to_utc(gmst,mjd):\n \"\"\"Convert gmst times in hours to utc hours using astropy\n \"\"\"\n\n mjd=int(mjd)\n time_obj_ref = at.Time(mjd, format='mjd', scale='utc')\n time_sidereal_ref = time_obj_ref.sidereal_time('mean', 'greenwich').hour\n time_utc = (gmst - time_sidereal_ref) * 0.9972695601848 \n return time_utc\n\ndef utc_to_gmst(utc, mjd):\n \"\"\"Convert utc times in hours to gmst using astropy\n \"\"\"\n mjd=int(mjd) #MJD should always be an integer, but was float in older versions of the code\n time_obj = at.Time(utc/24.0 + np.floor(mjd), format='mjd', scale='utc')\n time_sidereal = time_obj.sidereal_time('mean','greenwich').hour\n return time_sidereal\n\ndef earthrot(vecs, thetas):\n \"\"\"Rotate a vector / array of vectors about the z-direction by theta / array of thetas (radian)\n \"\"\"\n\n if len(vecs.shape)==1:\n vecs = np.array([vecs])\n if np.isscalar(thetas):\n thetas = np.array([thetas for i in range(len(vecs))])\n\n # equal numbers of sites and angles\n if len(thetas) == len(vecs):\n rotvec = np.array([np.dot(np.array(((np.cos(thetas[i]),-np.sin(thetas[i]),0),(np.sin(thetas[i]),np.cos(thetas[i]),0),(0,0,1))), vecs[i])\n for i in range(len(vecs))])\n\n # only one rotation angle, many sites\n elif len(thetas) == 1:\n rotvec = np.array([np.dot(np.array(((np.cos(thetas[0]),-np.sin(thetas[0]),0),(np.sin(thetas[0]),np.cos(thetas[0]),0),(0,0,1))), vecs[i])\n for i in range(len(vecs))])\n # only one site, many angles\n elif len(vecs) == 1:\n rotvec = np.array([np.dot(np.array(((np.cos(thetas[i]),-np.sin(thetas[i]),0),(np.sin(thetas[i]),np.cos(thetas[i]),0),(0,0,1))), vecs[0])\n for i in range(len(thetas))])\n else:\n raise Exception(\"Unequal numbers of vectors and angles in earthrot(vecs, thetas)!\")\n\n #if rotvec.shape[0]==1: rotvec = rotvec[0]\n return rotvec\n\ndef elev(obsvecs, sourcevec):\n \"\"\"Return the elevation of a source with respect to an observer/observers in radians\n obsvec can be an array of vectors but sourcevec can ONLY be a single vector\n \"\"\"\n\n if len(obsvecs.shape)==1:\n obsvecs=np.array([obsvecs])\n\n anglebtw = np.array([np.dot(obsvec,sourcevec)/np.linalg.norm(obsvec)/np.linalg.norm(sourcevec) for obsvec in obsvecs])\n el = 0.5*np.pi - np.arccos(anglebtw)\n\n return el\n\ndef elevcut(obsvecs, sourcevec, elevmin=ELEV_LOW, elevmax=ELEV_HIGH):\n \"\"\"Return True if a source is observable by a telescope vector\n \"\"\"\n\n angles = elev(obsvecs, sourcevec)/DEGREE\n\n return (angles > elevmin) * (angles < elevmax)\n\ndef hr_angle(gst, lon, ra):\n \"\"\"Computes the hour angle for a source at RA, observer at longitude long, and GMST time gst\n gst in hours, ra & lon ALL in radian\n longitude positive east\n \"\"\"\n\n hr_angle = np.mod(gst + lon - ra, 2*np.pi)\n return hr_angle\n\ndef par_angle(hr_angle, lat, dec):\n \"\"\"Compute the parallactic angle for a source at hr_angle and dec for an observer with latitude lat.\n All angles in radian\n \"\"\"\n\n num = np.sin(hr_angle)*np.cos(lat)\n denom = np.sin(lat)*np.cos(dec) - np.cos(lat)*np.sin(dec)*np.cos(hr_angle)\n\n return np.arctan2(num, denom)\n\ndef xyz_2_latlong(obsvecs):\n \"\"\"Compute the (geocentric) latitude and longitude of a site at geocentric position x,y,z\n The output is in radians\n \"\"\"\n\n if len(obsvecs.shape)==1:\n obsvecs=np.array([obsvecs])\n out = []\n for obsvec in obsvecs:\n x = obsvec[0]\n y = obsvec[1]\n z = obsvec[2]\n lon = np.array(np.arctan2(y,x))\n lat = np.array(np.arctan2(z, np.sqrt(x**2+y**2)))\n out.append([lat,lon])\n\n out = np.array(out)\n\n #if out.shape[0]==1: out = out[0]\n return out\n","repo_name":"brandonschool/EventHorizonTelescope","sub_path":"ehtim/observing/obs_helpers.py","file_name":"obs_helpers.py","file_ext":"py","file_size_in_byte":28434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"34932828514","text":"import os\r\nimport re\r\nimport random\r\nfrom pymorphy2 import MorphAnalyzer\r\n\r\nmorph = MorphAnalyzer()\r\n\r\n\r\ninquiry = \"Введите фразу('выход' чтобы прекратить): \"\r\n\r\nexit_phrase = \"выход\" \r\n\r\n\r\ndef load_data(filepath):\r\n if not os.path.exists(filepath):\r\n return None\r\n with open(filepath, \"r\", encoding='utf-8') as file_handler:\r\n return file_handler.read()\r\n\r\n\r\ndef get_words(text):\r\n return re.findall(r\"\\w+\", text.lower())\r\n\r\n\r\ndef search_for_same_POS(list_of_words, tags):\r\n for word in list_of_words:\r\n if tags.gender is not None:\r\n if morph.parse(word)[0].tag.POS == tags.POS and morph.parse(word)[0].tag.gender == tags.gender:\r\n rnd = random.random()\r\n if rnd > 0.9:\r\n return word\r\n else:\r\n continue\r\n else:\r\n if morph.parse(word)[0].tag.POS == tags.POS:\r\n rnd = random.random()\r\n if rnd > 0.9:\r\n return word\r\n else:\r\n continue\r\n\r\n\r\ndef apply_inflect(new_word, tags):\r\n grammems = set()\r\n if tags.tense is not None:\r\n grammems.add(tags.tense)\r\n if tags.number is not None:\r\n grammems.add(tags.number)\r\n if tags.case is not None:\r\n grammems.add(tags.case)\r\n if tags.person is not None:\r\n grammems.add(tags.person)\r\n if tags.aspect is not None:\r\n grammems.add(tags.aspect)\r\n try:\r\n return morph.parse(new_word)[0].inflect(grammems).word\r\n except AttributeError:\r\n return new_word\r\n\r\n\r\ndef run_bot(list_of_words):\r\n while True:\r\n answer = input(inquiry)\r\n if answer == exit_phrase:\r\n break\r\n else:\r\n bot_answer = \"Бот отвечает: \"\r\n for word in answer.split(\" \"):\r\n punctuation = \"\"\r\n if word[-1] in \"!@#$%^&()?/,\":\r\n punctuation = word[-1]\r\n word = word[0:-1]\r\n word_tags = morph.parse(word)[0].tag\r\n new_word = search_for_same_POS(list_of_words, word_tags)\r\n bot_answer += apply_inflect(new_word, word_tags)\r\n bot_answer += punctuation\r\n bot_answer += \" \"\r\n print(bot_answer)\r\n\r\n\r\nif __name__ == '__main__':\r\n list_of_words = get_words(load_data(\"example.txt\"))\r\n run_bot(list_of_words)\r\n","repo_name":"mpinigina/HW_learnpython","sub_path":"Project5/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2749738368","text":"def team(s, e):\r\n global answer\r\n if len(start_team) == n // 2:\r\n result = cal()\r\n if answer > result:\r\n answer = result\r\n return\r\n \r\n for i in range(s, e):\r\n start_team.append(members[i])\r\n team(i + 1, e)\r\n start_team.pop()\r\n \r\ndef cal():\r\n link_team = list(set(members) - set(start_team))\r\n score_list = []\r\n for team in (start_team, link_team):\r\n temp = 0\r\n for i in range(len(team) - 1):\r\n for j in range(i + 1, len(team)):\r\n temp += status[team[i] - 1][team[j] - 1]\r\n temp += status[team[j] - 1][team[i] - 1]\r\n score_list.append(temp)\r\n \r\n return abs(score_list[0] - score_list[1])\r\n\r\nn = int(input())\r\nanswer = n * 2 * 100\r\nstatus = [list(map(int, input().split())) for _ in range(n)]\r\nmembers = list(range(1, n + 1))\r\nstart_team = []\r\n\r\nteam(0, n)\r\nprint(answer)","repo_name":"JeongBeomi/Algorithm_sol","sub_path":"백준/Silver/14889. 스타트와 링크/스타트와 링크.py","file_name":"스타트와 링크.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1735102813","text":"# Finding the Boundary with Binary Search\n# https://algo.monster/problems/binary_search_boundary\n\n# An array of boolean values is divided into two sections; the left section consists of all false and the right\n# section consists of all true. Find the boundary of the right section, i.e. the index of the first true element. If\n# there is no true element, return -1.\n#\n# Input: arr = [false, false, true, true, true]\n#\n# Output: 2\n#\n# Explanation: first true's index is 2.\n\nfrom typing import List\n\n\n# Time Complexity: O(log(n))\n#\n# The binary decision we have to make when we look at an element is\n# 1. if the element is false, we discard everything to the left and the current element itself.\n# 2. if the element is true, the current element could be the first true although there may be other true to the left.\n# We discard everything to the right but what about the current element?\n# We keep a variable boundary_index that represents the leftmost true's index currently recorded. If the current\n# element is true, then we update boundary_index with its index and discard everything to the right including the\n# current element itself since its index has been recorded by the variable.\n\ndef find_boundary(arr: List[bool]) -> int:\n l, r = 0, len(arr) - 1\n boundary = -1\n while l <= r:\n mid = (l + r) // 2\n if arr[mid]:\n boundary = mid\n r = mid - 1\n else:\n l = mid + 1\n\n return boundary\n\n\nif __name__ == '__main__':\n arr = [x == \"true\" for x in input().split()]\n res = find_boundary(arr)\n print(res)\n\n\n# Input:\n# test #1: false false true true true\n# test #2: true\n# test #3: false false false\n# test #4: true true true true true\n# test #5: false true\n","repo_name":"henrylin2008/Coding_Problems","sub_path":"LeetCode/Blind 75/Binary Search/Finding the boundary.py","file_name":"Finding the boundary.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"38043571493","text":"# Реализация генетического алгоритма\n\nimport numpy as np\nimport random as r\nfrom itertools import combinations\n\n# Диофантово уравнение\ndef f(a, b, c, d):\n\treturn abs(a + 2*b + 3*c + 4*d - 30)\n\n# Метод составления пар по индексам \ndef make_pairs(mins):\n\tpairs = list(set([i for i in combinations(mins, 2) if i[0]!=i[1]])) # Комбинации из 3 элементов с учетом порядка\n\tnew_pairs = [[i[0],i[1]] for i in pairs] # Комбинации в списке\n\tfor el in list(set([0,1,2,3,4])-set(mins)): # Добавляем остальные индексы\n\t\tnew_pairs.append([mins[0], el]) # Добавляем индекс к самому сильному\n\tfor i in range(len(new_pairs)): # Проходим по всем парам\n\t\tif r.randint(0,1): new_pairs[i] = new_pairs[i][::-1] # Меняем порядок пары\n\tr.shuffle(new_pairs) # Меняем случайно порядок пар\n\treturn new_pairs # Возвращаем список 5 пар\n\n# Метод определения нужного индекса для скрещивания генов родителей на конкретном шаге\ndef circle_of_inds(index, number_of_elements, number_of_lists):\n\tcnt = 1\n\tlst = []\n\tfor _ in range(number_of_lists): # Проходим по количеству списков\n\t\tif cnt < number_of_elements: lst.append(cnt) # Если счетчик меньше количества списков, то добавляем его в список\n\t\telse: \n\t\t\tcnt = 1 # Иначе ставим стартовое значение счетчику \n\t\t\tlst.append(cnt) # И добавляем его в список\n\t\tcnt += 1\n\treturn lst[index] # Возвращаем нужный индекс на конкретном шаге\n\n# Генерируем новое поколение\ndef generate_mutation(matrix, pairs):\n\tnew_matrix = []\n\tfor i in range(len(pairs)): # Проходимся по парам\n\t\tk = circle_of_inds(i, len(matrix[0]), len(matrix)) # Находим нужны�� индекс для среза\n\t\tnew_matrix.append(np.concatenate((matrix[pairs[i][0]][:k],matrix[pairs[i][1]][k:]))) # Скрещиваем родителей и получаем нового потомка\n\treturn np.array(new_matrix) # Возвращаем новое поколение\n\n# Метод нахождения коэффициентов выживаемости для таблицы\ndef make_surv(matrix):\n\tsurv = [[f(a,b,c,d)] for a,b,c,d in matrix]\n\tfor i in range(len(matrix)):\n\t\tsurv[i].append(i)\n\treturn surv\n\n# Метод нахождения тройки минимальных коэффициентов выживаемости\ndef make_mins(surv):\n\tmins = []\n\tfor lst in sorted(surv, key=lambda el: el[0])[:3]:\n\t\tmins.append(lst[-1])\n\treturn mins\n\n# Метод изменения максимального коэффициента выживаемости\ndef change_max_descendant(new_matrix):\n\tsurv = make_surv(new_matrix) # Находим коэффициенты выживаемости\n\tmx = sorted(surv, key=lambda el: el[0], reverse=True)[0] # Находим максимальный коэффициент\n\twhile True:\n\t\ta, b, c, d = r.randint(1, 29), r.randint(1, 29), r.randint(1, 29), r.randint(1, 29) # Генерируем 4 гена\n\t\tif f(a, b, c, d) < mx[0]: # Если коэффициент стал меньше максимального\n\t\t\tnew_matrix[mx[1]] = [a, b, c, d] # То перезаписываем гены\n\t\t\tbreak\n\treturn new_matrix # Возвращаем матрицу генов\n\n# По приципу Дарвина создаём новую пятерку из таблиц\ndef generate_matrix_min(matrix, new_matrix):\n\tm1 = make_surv(matrix) # Коэффициенты выживаемости родителей\n\tm2 = make_surv(new_matrix) # Коэффициенты выживаемости потомков\n\tfor i in range(len(m1)): m1[i].append(0) # Добавляем номер для первой таблицы\n\tfor i in range(len(m2)): m2[i].append(1) # Добавляем номер для второй таблицы\n\tm = sorted(m1 + m2, key=lambda el: el[0])[:5] # Берём пятёрку минимальных коэффициентов из двух списков\n\tmatrix_min = []\n\tfor i in range(5): # Заполняем результирующую таблицу\n\t\tif m[i][-1] == 0: matrix_min.append(matrix[m[i][1]]) # Добавляем список из первой матрицы\n\t\telse: matrix_min.append(new_matrix[m[i][1]]) # Добавляем список из второй матрицы\n\treturn np.array(matrix_min)\n\n# Инициализиурем поколение хромосом\nmatrix = np.array([[r.randint(1,29) for _ in range(4)] for _ in range(5)])\n\n# Поколение хромосом, взятое из примера на ПЗ\n#matrix = np.array([\n#\t[1,28,15,3],\n#\t[14,9,2,4],\n#\t[13,5,7,3],\n#\t[23,8,16,19],\n#\t[9,13,5,2]\n#])\n\nprint('Инициализировали поколение хромосом:')\nprint(matrix)\nprint('Составляем списки коэффициентов выживаемости и лучшую тройку:')\nsurv = make_surv(matrix)\nmins = make_mins(surv)\nprint(surv, mins)\nprint('Составляем 5 пар для получения потомков:')\nprint(make_pairs(mins))\nprint('Составляем таблицу потомков:')\nnew_matrix = generate_mutation(matrix,make_pairs(mins))\nprint(new_matrix)\nprint('Меняем одного потомка с максимальным коэффициентом, получим измененную таблицу потомков:')\nnew_matrix = change_max_descendant(new_matrix)\nprint(new_matrix)\nprint('По принципу Дарвина составляем новую таблицу сильнейших:')\nresult_matrix = generate_matrix_min(matrix,new_matrix)\nprint(result_matrix)\nprint('Проверяем коэффициенты выживаемости для таблицы сильнейших:')\nsurv = make_surv(result_matrix)\nmins = make_mins(surv)\nprint(surv,mins)\nprint('Далее алгоритм идет по тому же пути, пока хотя бы один коэффициент выживаемости не будет равен 0')\n\ncnt = 2\n\nwhile True:\n\tprint(f'Шаг {cnt}:')\n\tmatrix = result_matrix\n\tsurv = make_surv(matrix)\n\tmins = make_mins(surv)\n\tnew_matrix = generate_mutation(matrix,make_pairs(mins))\n\tnew_matrix = change_max_descendant(new_matrix)\n\tresult_matrix = generate_matrix_min(matrix,new_matrix)\n\tsurv = make_surv(result_matrix)\n\tmins = make_mins(surv)\n\tprint(surv)\n\tif surv[0][0] == 0:\n\t\tprint(result_matrix)\n\t\tbreak\n\tcnt += 1\n","repo_name":"zhenyachess/Mathematical-methods-of-pattern-recognition","sub_path":"p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24605864627","text":"import pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom utils import LATEST_DATA, PROJECT_PATH\n\n\nmpl.rcParams[\"svg.hashsalt\"] = \"trending-header\"\n\n\ndef plot_header_img():\n import matplotlib as mpl\n\n mpl.rcParams[\"axes.spines.left\"] = False\n mpl.rcParams[\"axes.spines.right\"] = False\n mpl.rcParams[\"axes.spines.top\"] = False\n mpl.rcParams[\"axes.spines.bottom\"] = False\n\n df = pd.read_csv(LATEST_DATA)\n df[\"query_date\"] = pd.to_datetime(df[\"query_date\"])\n df = df.query_date.groupby(df.query_date.dt.year).agg(\"count\")\n\n fig, ax = plt.subplots(1, figsize=(12, 3))\n\n ax.plot(df.index, df.values, color=\"#FF053E\", linewidth=4)\n ax.plot(\n df.index[-1],\n df.values[-1],\n \"o-\",\n linewidth=4,\n markerfacecolor=\"#FAF82E\",\n markeredgewidth=3,\n markersize=12,\n color=\"#FF053E\",\n )\n\n ax.set_xticks(df.index)\n ax.tick_params(colors=\"#777\")\n plt.savefig(\n PROJECT_PATH / \"imgs/year_mails_count.svg\",\n transparent=True,\n metadata={\"Date\": None},\n )\n\n\nif __name__ == \"__main__\":\n plot_header_img()\n","repo_name":"jaredyam/sl-mayor-mailbox","sub_path":"scripts/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"44004848136","text":"from pymysqlpool.pool import Pool\n\nfrom dbconnection.db_config import Config\n\n\nclass DatabasePool(object):\n INSTANCE = None\n\n def __init__(self, config):\n if self.INSTANCE is not None:\n raise ValueError(\"An instantiation already exists!\")\n else:\n self.__cnxPool = Pool(host=config.db_host, port=config.db_port, user=config.db_user\n , password=config.db_password, db=config.db_name)\n self.__cnxPool.init()\n\n @classmethod\n def get_instance(cls, config):\n if cls.INSTANCE is None:\n cls.INSTANCE = DatabasePool(config)\n return cls.INSTANCE;\n\n def get_connection(self):\n return self.__cnxPool.get_conn()\n\n @classmethod\n def pool_close(cls):\n cls.INSTANCE = None;\n\n def __enter__(self):\n self.conn = self.__cnxPool.get_conn()\n return self.conn\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.__cnxPool.release(self.conn)\n\n\nif __name__ == \"__main__\":\n config = Config(con_file='../resources/user_properties.ini')\n print(config)\n with DatabasePool.get_instance(config) as conn:\n print(\"conn\", conn)\n\n DatabasePool.pool_close()\n\n with DatabasePool.get_instance(config) as conn:\n print(\"conn\", conn)","repo_name":"MinSu-Kim/pyqt_erp","sub_path":"dbconnection/db_pool.py","file_name":"db_pool.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25440413077","text":"\nimport os\n\ndef list_files_to_txt(startpath):\n with open(os.path.join(startpath, \"directory_structure.txt\"), \"w\") as txt_file:\n for root, dirs, files in os.walk(startpath):\n level = root.replace(startpath, '').count(os.sep)\n indent = ' ' * 4 * (level)\n txt_file.write(f\"{indent}{os.path.basename(root)}/\\n\")\n subindent = ' ' * 4 * (level + 1)\n for f in files:\n txt_file.write(f\"{subindent}{f}\\n\")\n\n# Usage example; replace the directory path with the one you want to explore.\nlist_files_to_txt(r\"F:\\Matthew Theodore_Microscopy\\RNA Phages\\FISH\\biorep_1_FISH_test\")\n","repo_name":"matttheodore/Image_Analysis","sub_path":"Python/functions/list_directory_structure_GPT.py","file_name":"list_directory_structure_GPT.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"30249864012","text":"print(\"=\"*10)\nprint(\"CENTRAL\")\nprint(\"=\"*10)\nnumero1 = int(input(\"Digite um valor:\"))\nnumero2 = int(input(\"Digite um valor:\"))\n\nf = 0\nwhile f < 1:\n print(\"=\"*10)\n print(\" MENU\")\n print(\"=\"*10)\n operacao = int(input(\"[1] SOMA\\n[2] MULTIPLICAR\\n[3] MAIOR\\n[4] NOVOS NUMEROS\\n[5] SAIR DO PROGRAMA\\nEscolha:\"))\n print(\"-=-\"*3)\n if operacao == 1:\n soma = numero1 + numero2\n print(f\"A soma é {soma}\")\n elif operacao == 2:\n mult = numero1 * numero2\n print(f\"A multiplicação é {mult}\")\n elif operacao == 3:\n if numero1 > numero2:\n maior = numero1\n print(f\"O maior numero é {maior}\")\n elif numero1 < numero2:\n maior = numero2\n print(f\"O maior numero é {maior}\")\n elif numero1 == numero2:\n print(\"Sao iguais\")\n elif operacao == 4:\n numero1 = int(input(\"Digite um valor:\"))\n numero2 = int(input(\"Digite um valor:\"))\n elif operacao == 5:\n print(\"Encerrando...\")\n break\n","repo_name":"llRedXD/Cursos","sub_path":"CursoEmVideo/Python/Mundo2/Ex059.py","file_name":"Ex059.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23004287139","text":"###############################################################################\n# Script for reproducing the results of OQR paper\n###############################################################################\n\nimport time\nfrom reproducible_experiments.run_experiment import run_experiment\nfrom utils.penalty_multipliers import real_corr_per_dataset_per_loss\nprocesses_to_run_in_parallel = 1\n\nloss_functions = ['batch_qr', 'batch_int', 'batch_wqr']\ndatasets = ['kin8nm', 'naval', 'meps_19', 'meps_20', 'meps_21', 'facebook_1', 'facebook_2',\n 'blog_data', 'bio']\n\n\ncorr_mults = real_corr_per_dataset_per_loss\nhsic_mults = corr_mults['hsic_qr']\nseed = (3, 10)\n# adding to a list all running configurations\nall_params = []\nfor data in datasets:\n for loss in loss_functions:\n all_params += [\n {\n 'loss': loss,\n 'data': data,\n 'data_type': 'REAL',\n 'seed': seed,\n 'corr_mult': 0,\n 'hsic_mult': 0,\n 'method': 'QR'\n },\n {\n 'loss': loss,\n 'data': data,\n 'data_type': 'REAL',\n 'seed': seed,\n 'corr_mult': corr_mults[loss.replace(\"batch_\", \"\")][data],\n 'hsic_mult': 0,\n 'method': 'QR'\n\n }]\n all_params += [{\n 'loss': 'batch_qr',\n 'data': data,\n 'data_type': 'REAL',\n 'seed': seed,\n 'corr_mult': 0,\n 'hsic_mult': hsic_mults[data],\n 'method': 'QR'\n\n }]\n\n\nprocesses_to_run_in_parallel = min(processes_to_run_in_parallel, len(all_params))\n\n\nif __name__ == '__main__':\n print(\"jobs to do: \", len(all_params))\n\n # initializing the first workers\n workers = []\n jobs_finished_so_far = 0\n assert len(all_params) >= processes_to_run_in_parallel\n for _ in range(processes_to_run_in_parallel):\n curr_params = all_params.pop(0)\n p = run_experiment(curr_params)\n workers.append(p)\n\n # creating a new process when an old one dies\n while len(all_params) > 0:\n dead_workers_indexes = [i for i in range(len(workers)) if (workers[i].poll() is not None)]\n for i in dead_workers_indexes:\n worker = workers[i]\n worker.communicate()\n jobs_finished_so_far += 1\n if len(all_params) > 0:\n curr_params = all_params.pop(0)\n p = run_experiment(curr_params)\n workers[i] = p\n if jobs_finished_so_far % processes_to_run_in_parallel == 0:\n print(f\"finished so far: {jobs_finished_so_far}, {len(all_params)} jobs left\")\n\n time.sleep(5)\n\n # joining all last processes\n for worker in workers:\n worker.communicate()\n jobs_finished_so_far += 1\n\n print(\"finished all\")\n","repo_name":"Shai128/oqr","sub_path":"reproducible_experiments/run_all_real_data_experiments.py","file_name":"run_all_real_data_experiments.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"62"} +{"seq_id":"73430211077","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.utils.translation import gettext as _\n\nUNDEFINED_LBL = '-- ' + _('Prefer not to say') + ' --'\n\nGENDERS = (\n\t(' ', UNDEFINED_LBL),\n ('M', _('Male')),\n ('F', _('Female')),\n)\n\nKEYS = ['tbgs1', 'tlyon3_sme', 'thdsrmzdlnegdr', 'uknfcrslf', 'skyngdckmtf']\nSERVERS = ['aws', 'gcloud', 'cpython', 'apache', 'dummy']\nPROVIDERS = ['es_MX', 'es_ES', 'en_US', 'en_UK', 'es']\nIPS = ['local', 'remote', '127.0.0.1', 'ipv4', 'ipv6']\nCRIP_UID = ['309160-@UID@d759-01a-uzpz-123lkj', 'abc62k-@UID@dpemc1390kda-84jk-v', 'uer932-@UID@d3ix-u93-021m-coakc4', '3a9c16-@UID@d6p37-e-i5md', 'coak33-@UID@dkd7-hashmd']\n\ndef validate_recaptcha(recaptcha_response):\n\timport json\n\timport urllib\n\n\turl = 'https://www.google.com/recaptcha/api/siteverify'\n\n\tvalues = {\n\t\t'secret': '6LeI14wUAAAAAOxMiweSxXlmJxvhMULrE0X6-oD7',\n\t\t'response': recaptcha_response\n\t}\n\n\tdata = urllib.parse.urlencode(values).encode()\n\treq = urllib.request.Request(url, data=data)\n\tresponse = urllib.request.urlopen(req)\n\n\treturn json.loads(response.read().decode())\n\ndef retrieve_recaptcha_error(error_codes):\n\tmsg = _('reCAPTCHA validation has failed') + '. '\n\tmsg += _('Retry, and if the problem persist get in touch with the system administrator and report')\n\tmsg += ': '\n\n\tif 'invalid-input-secret' in error_codes:\n\t\tmsg += _('The secret parameter is invalid or malformed').lower()\n\n\tif 'missing-input-secret' in error_codes:\n\t\tmsg += ', ' + _('The secret parameter is missing').lower()\n\n\tif 'missing-input-response' in error_codes:\n\t\tmsg += ', ' + _('The response parameter is missing').lower()\n\n\tif 'invalid-input-response' in error_codes:\n\t\tmsg += ', ' + _('The response parameter is invalid or malformed').lower()\n\n\tif 'bad-request' in error_codes:\n\t\tmsg += ', ' + _('The request is invalid or malformed').lower()\n\n\treturn msg\n\ndef get_main_url(request):\n\turl=request.build_absolute_uri()\n\treturn url[:url.index(request.get_full_path())]","repo_name":"EDario333/adan","sub_path":"web/app/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23429885400","text":"import os\r\nimport csv\r\n\r\ndef rename_files_in_folders(root_folder):\r\n try:\r\n csv_file_path = f'{root_folder}_originalfiles.csv'\r\n with open(csv_file_path, 'w', newline='') as csvfile:\r\n csv_writer = csv.writer(csvfile)\r\n csv_writer.writerow(['Original File Name', 'New File Name'])\r\n\r\n for folder_name, _, files in os.walk(root_folder):\r\n folder_base = os.path.basename(folder_name)\r\n files.sort()\r\n file_counter = 1\r\n\r\n for file_name in files:\r\n file_base, file_extension = os.path.splitext(file_name)\r\n new_file_name = f\"{folder_base}_{'{:04d}a'.format(file_counter)}{file_extension}\"\r\n file_counter += 1\r\n\r\n old_path = os.path.join(folder_name, file_name)\r\n new_path = os.path.join(folder_name, new_file_name)\r\n\r\n try:\r\n os.rename(old_path, new_path)\r\n csv_writer.writerow([file_name, new_file_name])\r\n except Exception as rename_error:\r\n print(f\"Error renaming file '{file_name}' in folder '{folder_name}': {rename_error}\")\r\n continue # Continue to the next file in case of an error\r\n\r\n print(f\"Successfully renamed files in {folder_name}\")\r\n\r\n print(f\"CSV file '{csv_file_path}' created successfully.\")\r\n\r\n except Exception as e:\r\n print(f\"An unexpected error occurred: {e}\")\r\n raise\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n root_folder = input('Enter the path to the directory: ')\r\n\r\n if not os.path.exists(root_folder) or not os.path.isdir(root_folder):\r\n raise FileNotFoundError(f\"The specified directory '{root_folder}' does not exist.\")\r\n\r\n rename_files_in_folders(root_folder)\r\n print(\"File renaming completed successfully.\")\r\n\r\n except Exception as main_error:\r\n print(f\"An unexpected error occurred: {main_error}\")\r\n","repo_name":"gmurphyuab/dnddigipres","sub_path":"DroboProject/3_Folder2Filerenamer.py","file_name":"3_Folder2Filerenamer.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36695211603","text":"\"\"\"Creates a mirror of your GitHub repositories that is suitable for incremental backup.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport argparse\nimport errno\nimport logging\nimport os\nimport re\nimport shutil\nimport signal\nimport stat\nimport sys\n\nimport pcli.log\nimport psh\nimport psys.daemon\nimport requests\n\nfrom psys import eintr_retry\n\nlog = logging.getLogger(\"git-backup\")\n\n_LOCK_FILE_NAME = \".lock\"\n\n\nclass Error(Exception):\n def __init__(self, *args, **kwargs):\n message, args = args[0], args[1:]\n super(Error, self).__init__(\n message.format(*args, **kwargs) if args or kwargs else message)\n\n\ndef main():\n try:\n _configure_signal_handling()\n\n args = _parse_args()\n backup_dir = args.backup_dir\n\n pcli.log.setup(\n name=\"git-backup\", debug_mode=args.debug,\n level=logging.WARNING if not args.debug and args.cron else None)\n\n _check_backup_dir(backup_dir)\n\n lock_file_path = os.path.expanduser(os.path.join(backup_dir, _LOCK_FILE_NAME))\n\n try:\n lock_file_fd = psys.daemon.acquire_pidfile(lock_file_path)\n except psys.daemon.PidFileLockedError as e:\n if args.cron:\n log.debug(\"Exiting: %s\", e)\n else:\n raise Error(\"{}\", e)\n except psys.daemon.PidFileLockError as e:\n raise Error(\"{}\", e)\n else:\n try:\n _backup(args.user, backup_dir)\n finally:\n try:\n os.unlink(lock_file_path)\n except EnvironmentError as e:\n log.error(\"Failed to delete lock file '%s': %s.\", lock_file_path, e)\n finally:\n eintr_retry(os.close)(lock_file_fd)\n except Error as e:\n sys.exit(\"Error: {}\".format(e))\n\n\ndef _check_backup_dir(backup_dir):\n for forbidden_dir in \"/\", os.path.expanduser(\"~\"):\n try:\n forbidden = os.path.samefile(backup_dir, forbidden_dir)\n except EnvironmentError as e:\n if e.errno != errno.ENOENT:\n raise Error(\"Failed to check '{}' backup directory against '{}': {}.\", backup_dir, forbidden_dir, e)\n else:\n if forbidden:\n raise Error(\"Invalid backup directory '{}': it mustn't be / or your home directory \"\n \"because this script deletes all contents of the backup directory.\", backup_dir)\n\n\ndef _backup(user, backup_dir):\n repositories = sorted(_get_user_repositories(user), key=lambda name: name.lower())\n\n if repositories:\n log.info(\"User %s has %s repositories: %s.\", user, len(repositories), \", \".join(repositories))\n\n name_re = re.compile(r\"^[a-zA-Z0-9_-][a-zA-Z0-9._-]*\")\n for name in repositories[:]:\n if name_re.search(name) is None:\n log.error(\"Got an invalid repository name: '%s'. Ignore it.\", name)\n repositories.remove(name)\n else:\n log.info(\"User %s doesn't have any repositories.\", user)\n\n _cleanup(backup_dir, repositories)\n\n for name in repositories:\n url = \"https://github.com/{user}/{name}.git\".format(user=user, name=name)\n _mirror_repo(name, url, backup_dir)\n\n\ndef _cleanup(backup_dir, repositories):\n try:\n files = os.listdir(backup_dir)\n except EnvironmentError as e:\n raise Error(\"Unable to list '{}' directory: {}.\", backup_dir, e)\n\n cleanup_files = set(files) - set(repositories) - {_LOCK_FILE_NAME}\n\n for file_name in cleanup_files:\n path = os.path.join(backup_dir, file_name)\n\n if file_name.startswith(\".\"):\n log.debug(\"Removing '%s'.\", path)\n else:\n log.warning(\"Remove deleted repository '%s'.\", file_name)\n\n _rm_path(path)\n\n\ndef _rm_path(path):\n def log_error(error_path, error):\n log.error(\"Failed to remove '%s': %s.\".format(error_path, error))\n\n try:\n if stat.S_ISDIR(os.lstat(path).st_mode):\n shutil.rmtree(path, onerror=lambda func, path, excinfo: log_error(path, excinfo[1]))\n else:\n os.unlink(path)\n except EnvironmentError as e:\n log_error(path, e)\n\n\ndef _get_user_repositories(user):\n repos = set()\n max_pages = 100\n url = \"https://api.github.com/users/{user}/repos\".format(user=user)\n\n for page in range(1, max_pages + 1):\n try:\n response = requests.get(url, params={\"page\": page}, timeout=30)\n if response.status_code != requests.codes.ok:\n raise Error(response.reason)\n\n try:\n if response.headers.get(\"Content-Type\") == \"application/json\":\n raise ValueError\n\n repos_info = response.json()\n if not isinstance(repos_info, list):\n raise ValueError\n except ValueError:\n raise Error(\"Server returned an invalid response.\")\n except (requests.RequestException, Error) as e:\n raise Error(\"Failed to get a list of user repositories from {}: {}\", url, e)\n\n if not repos_info:\n break\n\n repos.update(repo_info[\"name\"] for repo_info in repos_info)\n else:\n log.error(\"Got too many repositories from {} (>{} pages). Skip the rest of pages.\", url, page)\n\n return list(repos)\n\n\ndef _mirror_repo(name, url, backup_dir):\n backup_path = os.path.join(backup_dir, name)\n temp_path = os.path.join(backup_dir, \".\" + name)\n\n if os.path.exists(backup_path):\n log.info(\"Syncing %s...\", name)\n\n try:\n _git(\"-C\", backup_path, \"fetch\")\n except psh.ExecutionError as e:\n log.error(\"Failed to sync %s repository: %s.\", name, e)\n else:\n log.info(\"Mirroring %s...\", name)\n\n try:\n _git(\"clone\", \"--mirror\", url, temp_path)\n _git(\"-C\", temp_path, \"gc\", \"--aggressive\")\n\n try:\n os.rename(temp_path, backup_path)\n except EnvironmentError as e:\n raise Error(\"Unable to rename '{}' to '{}': {}.\", temp_path, backup_path, e)\n except (psh.ExecutionError, Error) as e:\n log.error(\"Failed to mirror %s: %s.\", name, e)\n\n\ndef _git(*args):\n process = psh.sh.git(*args)\n\n try:\n process.execute()\n except BaseException as error:\n try:\n process.wait(check_status=False, kill=signal.SIGTERM)\n except psh.InvalidProcessState:\n pass\n\n raise error\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=\"Creates a mirror of your GitHub repositories that is suitable for incremental backup.\")\n parser.add_argument(\"user\", help=\"GitHub user name\")\n parser.add_argument(\"backup_dir\", help=\"directory to backup the repositories to\")\n parser.add_argument(\"--cron\", action=\"store_true\", help=\"cron mode\")\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\", help=\"debug mode\")\n args = parser.parse_args()\n\n if re.search(r\"^[a-zA-Z0-9._-]+\", args.user) is None:\n parser.error(\"Invalid user name.\")\n\n args.backup_dir = os.path.abspath(args.backup_dir)\n\n return args\n\n\ndef _configure_signal_handling():\n state = {\"terminating\": False}\n\n def terminate(signum, frame):\n if not state[\"terminating\"]:\n state[\"terminating\"] = True\n sys.exit(\"The program has been terminated.\")\n\n signal.signal(signal.SIGPIPE, signal.SIG_IGN)\n\n signal.signal(signal.SIGINT, terminate)\n signal.signal(signal.SIGTERM, terminate)\n signal.signal(signal.SIGQUIT, terminate)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KonishchevDmitry/git-backup","sub_path":"git_backup.py","file_name":"git_backup.py","file_ext":"py","file_size_in_byte":7618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"39241250876","text":"import pandas as pd\nimport pickle\nfrom konlpy.tag import *\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntraindata = pd.read_csv(\"fainl.csv\")\n\ndataset = traindata.loc[:, ['ISBN_THIRTEEN_NO']]\ndataset = dataset.astype({'ISBN_THIRTEEN_NO':'str'})\n\npickle.dump(dataset, open('bookDataISBN.pickle','wb'))\n\ncomment = traindata['BOOK_INTRCN_CN']\ncomment = comment.values.tolist()\nprint(type(comment))\n\nokt = Okt()\n\ndef morph(input_data) : #형태소 분석\n preprocessed1 = okt.nouns(input_data)\n # result = [word for word in preprocessed1 if not word in stop_words]\n return ' '.join(preprocessed1)\n\nresult = []\nfor i in range(len(comment)) :\n result.append(morph(comment[i]))\n\ntfidfv = TfidfVectorizer().fit(result)\nprint(tfidfv.transform(result).toarray())\ntfidf_matrix = tfidfv.transform(result).toarray()\nprint(tfidfv.vocabulary_)\n\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ncosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix)\nprint('코사인 유사도 연산 결과 :',cosine_sim.shape)\n\npickle.dump(cosine_sim, open('cosine_sim.pickle','wb'))","repo_name":"HASHTA-CapstoneDesign/LibraryRenewal_AI","sub_path":"Contents-based_Recommender_System.py","file_name":"Contents-based_Recommender_System.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13978521972","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 8 18:14:01 2020\nAuthor : Teskann\n\nEdit and run this file to run Docapy on your project !\n\"\"\"\n\nif __name__ == \"__main__\":\n # Import Docapy core\n from docapy import html_for_project\n\n # Project Name (edit it)\n project_name = \"Your Project Name Here\"\n\n # Project Path\n project_path = \"Your project path here (absolute path recommended)\"\n\n # Repository Link\n repo_link = \"Link of the repository of this project goes here\"\n\n # Accent color.\n # This variable can be :\n # - \"blue\"\n # - \"cyan\"\n # - \"red\"\n # - \"green\"\n # - \"orange\"\n # - \"purple\"\n # - \"#XXXXXX\" where XXXXXX is a hexadecimal color value (custom)\n color = \"Accent color of the generated website goes here\"\n\n # Running Docapy (do not edit this part)\n html_for_project(project_path,\n project_name,\n repo_link,\n color)\n","repo_name":"Teskann/Docapy","sub_path":"edit_and_run_me.py","file_name":"edit_and_run_me.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74249849797","text":"#!/usr/bin/python3\n'''Search for a State object\n'''\nimport sys\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom model_state import Base, State\n\nif len(sys.argv) < 5:\n print(\"4 args required: \")\n sys.exit()\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n query = session.query(State).filter(State.name == sys.argv[4])\\\n .order_by(State.id.asc())\n result = query.first()\n if result:\n print(f'{result.id}')\n else:\n print(\"Not found\")\n session.close()\n","repo_name":"jaymo99/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"27514628877","text":"from request_client.models import SearchKeyword\nfrom request_client.serializer import PostCommentsSerializer\nfrom typing import Dict\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass SerializerFunctions:\n def save_extracted_comments(self, comments: list, search_keyword:str) -> Dict:\n '''\n Serialize the collected post comments PostComment models\n '''\n print('save_extracted_comments', comments)\n keyword_instance, _ = SearchKeyword.objects.get_or_create(\n keyword=search_keyword)\n data_set = {'fk_keyword': keyword_instance.keyword_id}\n\n comments_data = [{\n 'comment': comment,\n **data_set\n } for comment in comments]\n instance = PostCommentsSerializer(data=comments_data, many=True)\n\n if instance.is_valid():\n instance.save()\n print('is_valid', comments_data)\n return {'data': instance.data, 'saved': True}\n else:\n return {'error': instance.errors, 'saved': False}\n","repo_name":"adarsh2104/facebook_comments_analysis","sub_path":"request_client/utils/serializer_functions.py","file_name":"serializer_functions.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71456364999","text":"#!/usr/bin/env python3\n\nimport errno\nimport os\nimport sys\n\nimport pysetns\n\n\ndef foo(ns):\n path = '/proc/%s/mounts' % ns.target_pid\n print(pid, pysetns.get_ns_string(ns.namespaces))\n\n if not os.path.exists('/proc'):\n err_code = errno.EAGAIN if ns.namespaces & pysetns.NS_MNT else errno.ENOENT\n print('[NS] \"/proc\" is not found. retry', file=sys.stderr)\n return err_code\n if not os.path.exists(path):\n path = '/proc/self/mounts'\n if not os.path.exists(path):\n print('[NS] Path is not exist for pid=%s: \"%s\"' % (ns.target_pid, path), file=sys.stderr)\n return errno.ENOENT\n for m in open(path).readlines():\n dev, mntp, tfs, opts, freq, passno = m.split()\n print('[NS] mntp=%s', mntp)\n\n\ndef bar(pid, namespaces):\n ns = pysetns.Namespace(pid, namespaces, keep_caps=True)\n ns.enter(foo, ns)\n for nst, msg in ns.errors.items():\n print('[NS] ERROR: <%s> %s' % (pysetns.get_ns_string(nst).upper(), msg), file=sys.stderr)\n return ns.retry\n\n\nif __name__ == '__main__':\n pid = os.getpid()\n if bar(pid, pysetns.NS_MNT | pysetns.NS_PID | pysetns.NS_USER):\n bar(pid, pysetns.NS_PID | pysetns.NS_USER)\n","repo_name":"baskiton/pysetns","sub_path":"examples/mountpoints.py","file_name":"mountpoints.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"14584471381","text":"import torch\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils, datasets\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom PIL import Image\nimport os\nimport csv\n\nDEFAULT_DATA_PATH = 'riri145/img/'\nDEFAULT_CSV_PATH = 'riri145/clean-data.csv'\nDEFAULT_SAVED_LABELS = 'riri145/preloaded.pt'\n\nclass InstagramDataset(Dataset):\n '''\n Characterizes a dataset for PyTorch.\n '''\n def __init__(self, dataset_path=DEFAULT_DATA_PATH, csv_path=DEFAULT_CSV_PATH,\n label_path = DEFAULT_SAVED_LABELS, transform=None):\n\n # Checks if pre-saved training labels are available.\n # If not, loads from csv file and saves to a .pt file\n # (pytorch default save extension) to be loaded up in the future.\n if not os.path.exists(label_path):\n\n # Opens CSV file for reading\n csv_file = open(csv_path)\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n # Creates dictionary to save all image names and labels\n data_dict = {\n 'image_names': [],\n 'labels': [],\n }\n\n # Iterates through csv file and grabs image names + labels\n for idx, line in enumerate(csv_reader):\n if idx > 0 and ('jpg' in line[3] or 'png' in line[3]):\n data_dict['image_names'].append(line[3])\n if int(line[-1]) == 1:\n data_dict['labels'].append(torch.tensor([1, 0], dtype=torch.float32))\n else:\n data_dict['labels'].append(torch.tensor([0, 1], dtype=torch.float32))\n\n # Saves for easy loading next time\n if not os.path.isdir(label_path[:label_path.rfind('/')]):\n os.makedirs(label_path[:label_path.rfind('/')])\n torch.save(data_dict, label_path)\n\n # Otherwise, just load the pre-saved dict.\n else:\n data_dict = torch.load(label_path)\n\n # Saves state variables\n self.data_dict = data_dict\n self.dataset_path = dataset_path\n self.label_path = label_path\n self.transform = transform\n\n\n def __len__(self):\n '''Denotes the total number of samples'''\n return len(self.data_dict['labels'])\n\n\n def __getitem__(self, index):\n '''Generates one sample of data'''\n # Select sample\n image_name = self.data_dict['image_names'][index]\n\n # Load data and get label (hacky - already preprocessed)\n X = self.transform(Image.open(self.dataset_path + image_name))\n y = self.data_dict['labels'][index]\n\n return X, y\n\n\ndef get_dataloaders(dataset_path=DEFAULT_DATA_PATH, csv_path=DEFAULT_CSV_PATH,\n label_path = DEFAULT_SAVED_LABELS, val_split=0.2, batch_sz=4,\n num_threads=1, shuffle_val=True):\n '''\n Grabs dataloaders for train/val sets.\n \n Keyword arguments:\n > dataset_path (string) -- Path to folder where all dataset images are stored.\n > csv_path (string) -- Path to csv file with image names and labels.\n > label_path (string) -- Path to saved labels (should be .pt file).\n > val_split (float) -- Fraction of training data to be used as validation set.\n > batch_sz (int) -- Batch size to be grabbed from DataLoader.\n > num_threads (int) -- Number of threads with which to load data.\n > shuffle_val (bool) -- Whether to shuffle validation set indices.\n\n Return value: (train_dataloader, test_dataloader)\n > train_dataloader -- a torch.utils.data.DataLoader wrapper around\n the specified dataset's training set.\n > val_dataloader -- a torch.utils.data.DataLoader wrapper around\n the specified dataset's validation set.\n '''\n\n # Describes the transforms we want. Using randomCrop and toTensor.\n transform = transforms.Compose([\n transforms.Resize((128, 128)), # 128 x 128 random crop of image.\n transforms.ToTensor(),\n ])\n\n # Constructs InstagramDataset to load data from\n dataset = InstagramDataset(dataset_path=dataset_path, csv_path=csv_path,\n label_path=label_path, transform=transform)\n\n # Grabs train/val split\n num_train = len(dataset)\n indices = list(range(num_train))\n split = int(np.floor(val_split * num_train))\n\n # Shuffle indices if ncessary for slicing val set\n if shuffle_val:\n np.random.shuffle(indices)\n\n # Performs train/val split\n train_idx, valid_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n val_sampler = SubsetRandomSampler(valid_idx)\n\n # Constructs dataloader wrappers around InstagramDataset training and test sets\n train_dataloader = DataLoader(dataset, batch_size=batch_sz, \n num_workers=num_threads, sampler=train_sampler)\n val_dataloader = DataLoader(dataset, batch_size=batch_sz, \n num_workers=num_threads, sampler=val_sampler)\n\n return (train_dataloader, val_dataloader)\n\n\ndef main():\n train_dataloader, val_dataloader = get_dataloaders()\n for i, thing in enumerate(train_dataloader):\n input, output = thing\n print(input.shape, output, output.shape)\n if i == 5: break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mbbbackus/InstagramPosts","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72868851398","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim \nimport numpy as np\nimport random \nfrom utils import *\nimport collections \nimport torch.nn.functional as F\n\n\nExperience=collections.namedtuple('Experience', field_names=[\n\t'state','action', 'reward', 'done', 'next_state'])\n\nclass REINFORCE_agent:\n\tdef __init__(self, env, gamma, epi_num, batch_size, learning_rate):\n\t\tself.env = env \n\t\tself.gamma = gamma \n\t\tself.epi_num = epi_num \n\t\tself.batch_size = batch_size\n\t\tself.learning_rate = learning_rate\n\t\tself.epi_total_reward = []\n\t\tself.epi_step_num = []\n\t\tself.epi_loss = []\n\t\tself._reset()\n\t\tself.init_pg_net()\n\n\tdef _reset(self):\n\t\tself.state = self.env.reset()\n\t\tself.total_reward = 0.0 \n\t\tself.step_num = 0 \n\t\tself.episode_exp = []\n\n\tdef init_pg_net(self):\n\t\tself.net = PG_net(self.env.observation_space.shape[0], self.env.action_space.n)\n\t\tself.optimizer = optim.Adam(self.net.parameters(), lr = self.learning_rate)\n\n\tdef take_action(self):\n\t\tstate_a = np.array([self.state], copy=False)\n\t\tstate_v = torch.tensor(state_a).to(device)\n\t\tlogits_v = self.net(state_v)\n\t\tprobs_v = F.softmax(logits_v, dim=1)\n\t\tprobs_a = probs_v.data.cpu().numpy()\n\t\taction = np.random.choice(len(probs_a), p = probs_a)\n\n\t\tnext_state, reward, done, info = self.env.step(action)\n\t\texp = Experience(self.state, action, reward, done, next_state)\n\t\tself.episode_exp.append(exp)\n\n\t\tself.total_reward += reward\n\t\tself.step_num += 1 \n\t\tself.state = next_stata\n\n\tdef update_net(self):\n\t\tstates = []\n\t\tactions = []\n\t\trewards = []\n\t\tfor step, exp in enumerate(self.episode_exp):\n\t\t\tstates.append(exp.state)\n\t\t\tactions.append(int(exp.action))\n\t\t\trewards.append(exp.reward)\n\t\t\tif exp.next_state is None:\n\t\t\t\tq_vals = cal_q_qvals(rewards, self.gamma)\n\n\t\tstates_v = torch.FloatTensor(states)\n\t\tactions_v = torch.LongTensor(actions)\n\t\tq_vals_v = torch.FloatTensor(q_vals)\n\t\tlogits_v = self.net(states_v)\n\t\tlog_prob_v = F.log_softmax(logits_v, dim=1)\n\t\tlog_prob_actions_v = q_vals_v*log_prob_v[range(len(states)), actions_v]\n\t\tloss_v = -log_prob_actions_v.mean()\n\t\tself.epi_loss.extend([loss_v.data.cpu().numpy()])\n\t\tself.optimizer.zero_grad()\n\t\tloss_v.backward()\n\t\tself.optimizer.step()\n\n\tdef run(self):\n\t\tfor epi in range(self.epi_num):\n\t\t\tself.reset()\n\t\t\tdone = False \n\t\t\twhile not done:\n\t\t\t\tself.take_action()\n\n\t\t\tself.update_net() # update net at the end of each episode.\n\t\t\tif epi % 100 == 0:\n\t\t\t\tself.epi_total_reward.extend([self.total_reward])\n\t\t\t\tself.epi_step_num.extend([self.step_num])\n\t\t\t\tprint('REINFORCE ~~~ epi {}, total_reward {}, step_num {}'.format(epi, self.total_reward, self.step_num))\n\t\tprint('REINFORCE ~~~ Training Finished !')\n\t\treturn self.epi_total_reward, self.epi_step_num, self.epi_loss\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yang0110/RL-Algorithms-Implementation","sub_path":"agents/reinforce_agent.py","file_name":"reinforce_agent.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"3520261969","text":"from rest_framework.test import APITestCase\n\nfrom ..models import Worker\n\n\nclass TestWorkerListView(APITestCase):\n def test_view_should_be_accessible(self):\n response = self.client.get(\"/workers/\")\n # dir use for print everything show all property of variable\n # print(dir(response))\n self.assertEqual(response.status_code, 200)\n\n def test_view_Should_render_list_of_worker_name(self):\n\n # # use when want to show all assertionError\n # self.maxDiff = None\n\n # Given\n Worker.objects.create(\n first_name='Narongvit',\n last_name='Promkhana',\n is_available=True,\n primary_phone='087-784-878x',\n secondary_phone='082-524-818x',\n address='Geeky Base All Star',\n )\n\n Worker.objects.create(\n first_name='Bothon',\n last_name='Narongvit',\n is_available=True,\n primary_phone='084-874-978x',\n secondary_phone='089-925-848x',\n address='Geeky Base All Star',\n )\n\n # When\n response = self.client.get(\"/workers/\")\n\n expected = [\n {\n \"first_name\": \"Narongvit\",\n \"last_name\": \"Promkhana\",\n \"is_available\": True,\n \"primary_phone\": \"087-784-878x\",\n \"secondary_phone\": \"082-524-818x\",\n \"address\": \"Geeky Base All Star\",\n },\n {\n \"first_name\": 'Bothon',\n \"last_name\": \"Narongvit\",\n \"is_available\": True,\n \"primary_phone\": \"084-874-978x\",\n \"secondary_phone\": \"089-925-848x\",\n \"address\": \"Geeky Base All Star\",\n }\n ]\n\n self.assertEqual(response.data, expected)\n","repo_name":"bothonachiz/hello_django","sub_path":"safety/workers/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17001858711","text":"import renderdoc as rd\nimport rdtest\n\n\nclass GL_Separable_Geometry_Shaders(rdtest.TestCase):\n demos_test_name = 'GL_Separable_Geometry_Shaders'\n\n def check_capture(self):\n action = self.find_action(\"Draw\")\n\n self.controller.SetFrameEvent(action.eventId, False)\n\n postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)\n\n postvs_ref = {\n 0: {\n 'vtx': 0,\n 'idx': 0,\n 'gl_Position': [-0.5, -0.5, 0.0, 1.0],\n 'v2f_block.col': [0.0, 1.0, 0.0, 1.0],\n 'v2f_block.uv': [0.0, 0.0, 0.0, 1.0],\n },\n 1: {\n 'vtx': 1,\n 'idx': 1,\n 'gl_Position': [0.0, 0.5, 0.0, 1.0],\n 'v2f_block.col': [0.0, 1.0, 0.0, 1.0],\n 'v2f_block.uv': [0.0, 1.0, 0.0, 1.0],\n },\n 2: {\n 'vtx': 2,\n 'idx': 2,\n 'gl_Position': [0.5, -0.5, 0.0, 1.0],\n 'v2f_block.col': [0.0, 1.0, 0.0, 1.0],\n 'v2f_block.uv': [1.0, 0.0, 0.0, 1.0],\n },\n }\n\n self.check_mesh_data(postvs_ref, postvs_data)\n\n postgs_data = self.get_postvs(action, rd.MeshDataStage.GSOut, 0, action.numIndices*3)\n\n postgs_ref = {\n 0: {\n 'vtx': 0,\n 'idx': 0,\n 'gl_Position': [0.2, -0.5, 0.0, 1.0],\n 'v2f_block.col': [0.0, 1.0, 0.0, 1.0],\n 'v2f_block.uv': [0.0, 0.0, 0.0, 1.0],\n },\n 1: {\n 'vtx': 1,\n 'idx': 1,\n 'gl_Position': [0.7, 0.5, 0.0, 1.0],\n 'v2f_block.col': [0.0, 1.0, 0.0, 1.0],\n 'v2f_block.uv': [0.0, 1.0, 0.0, 1.0],\n },\n 4: {\n 'vtx': 4,\n 'idx': 4,\n 'gl_Position': [-0.7, 0.5, 0.0, 1.0],\n 'v2f_block.col': [1.0, 0.0, 1.0, 0.0],\n 'v2f_block.uv': [0.0, 1.0, 0.0, 1.0],\n },\n 5: {\n 'vtx': 5,\n 'idx': 5,\n 'gl_Position': [-0.2, -0.5, 0.0, 1.0],\n 'v2f_block.col': [1.0, 0.0, 1.0, 0.0],\n 'v2f_block.uv': [1.0, 0.0, 0.0, 1.0],\n },\n 8: {\n 'vtx': 8,\n 'idx': 8,\n 'gl_Position': [0.5, 0.2, 0.0, 1.0],\n 'v2f_block.col': [1.0, 0.0, 0.0, 1.0],\n 'v2f_block.uv': [1.0, 0.0, 0.0, 1.0],\n },\n }\n\n self.check_mesh_data(postgs_ref, postgs_data)\n\n pipe: rd.PipeState = self.controller.GetPipelineState()\n\n rt = pipe.GetOutputTargets()[0].resourceId\n self.check_pixel_value(rt, 0.5, 0.1, [1.0, 0.0, 0.0, 1.0])\n self.check_pixel_value(rt, 0.75, 0.5, [0.0, 1.0, 0.0, 1.0])\n self.check_pixel_value(rt, 0.25, 0.5, [1.0, 0.0, 1.0, 0.0])\n\n out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100), rd.ReplayOutputType.Texture)\n\n tex = rd.TextureDisplay()\n tex.resourceId = rt\n tex.overlay = rd.DebugOverlay.Drawcall\n out.SetTextureDisplay(tex)\n out.Display()\n\n eps = 1.0 / 256.0\n\n overlay_id: rd.ResourceId = out.GetDebugOverlayTexID()\n\n self.check_pixel_value(overlay_id, 200, 100, [0.8, 0.1, 0.8, 1.0], eps=eps)\n self.check_pixel_value(overlay_id, 50, 150, [0.8, 0.1, 0.8, 1.0], eps=eps)\n self.check_pixel_value(overlay_id, 350, 150, [0.8, 0.1, 0.8, 1.0], eps=eps)\n\n self.check_pixel_value(overlay_id, 200, 150, [0.0, 0.0, 0.0, 0.5], eps=eps)\n self.check_pixel_value(overlay_id, 200, 225, [0.0, 0.0, 0.0, 0.5], eps=eps)\n self.check_pixel_value(overlay_id, 75, 50, [0.0, 0.0, 0.0, 0.5], eps=eps)\n self.check_pixel_value(overlay_id, 350, 50, [0.0, 0.0, 0.0, 0.5], eps=eps)\n\n out.Shutdown()\n","repo_name":"baldurk/renderdoc","sub_path":"util/test/tests/GL/GL_Separable_Geometry_Shaders.py","file_name":"GL_Separable_Geometry_Shaders.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":7948,"dataset":"github-code","pt":"62"} +{"seq_id":"33577711428","text":"import os\nfrom tqdm import tqdm\nimport config\nfrom hash.hasher import *\nfrom mods.controller import *\nfrom wand import image\nfrom usda import usda\n\n\nreplacements = \"\"\"#usda 1.0\n(\n upAxis = \"Y\"\n)\n\nover \"RootNode\"\n{\n over \"meshes\"\n {\n $newdata$\n }\n}\n\"\"\"\n\n \nexample_mesh = \"\"\" \n over Xform \"$name$\"(\n prepend references = @./SubUSDs/meshes/$name$.usda@\n )\n {\n }\n\"\"\"\n\nexample_usda = \"\"\"#usda 1.0\n(\n customLayerData = {\n uint64 geometrydescriptor = 15755592595057390902\n uint64 indices = 0\n uint64 legacyindices = 0\n uint64 legacypositions0 = 0\n uint64 legacypositions1 = 0\n uint64 positions = 5445149271889707011\n uint64 texcoords = 16764550512849261066\n uint64 vertexlayout = 5377511480456297298\n uint64 vertexshader = 0\n }\n defaultPrim = \"$mesh$\"\n doc = \"Generated\"\n metersPerUnit = 1\n timeCodesPerSecond = 24\n upAxis = \"Y\"\n)\n\nover Xform \"$mesh$\"\n{\n token visibility = \"inherited\"\n\n over Mesh \"mesh\"\n {\n uniform bool doubleSided = 0\n int[] faceVertexCounts = [$faceVertexCounts$]\n int[] faceVertexIndices = [$faceVertexIndices$]\n normal3f[] normals = [$normals$]\n uniform token orientation = \"leftHanded\"\n point3f[] points = [$points$]\n texCoord2f[] primvars:st = [$uvs$] (\n interpolation = \"vertex\"\n )\n uniform token subdivisionScheme = \"none\"\n token visibility = \"inherited\"\n }\n}\n\n\"\"\"\n\nmod = \"\"\nRootNode_Looks = {}\nmat_names = {}\njson_data = {}\n\ndef saveAllTextures(mod_dir, replacements_file):\n global mod, RootNode_Looks, mat_names, json_data\n replacements_file = \"/meshes.usda\"\n replacements_file = replacements_file.replace(\"/\",\"\")\n\n replacements_file_dir = f\"{config.rtx_remix_dir}/mods/{mod_dir}/{replacements_file}\"\n isExist = os.path.exists(replacements_file_dir)\n usda_back = \"\"\n\n\n hasherObj = hasher()\n for x in tqdm( os.listdir(\"meshes/\"), desc=\"Converting...\" ):\n if x.endswith(\".obj\"):\n f = open(f\"meshes/{x}\", \"r\")\n data = f.read()\n f.close()\n\n points = \"\"\n normals = \"\"\n uvs = \"\"\n faceVertexIndices = \"\"\n faceVertexCounts = \"\"\n\n splitted = data.split(\"\\n\")\n for y in range(len(splitted)):\n temp = splitted[y]\n if( len( temp ) < 1 ):\n continue\n\n if( temp[0] == \"v\" and temp[1] != \"t\" and temp[1] != \"n\" ):\n points += \"(\" + temp.replace(\"v \",\"\").replace(\" \",\",\") + \"),\"\n continue\n\n if( temp[0] == \"v\" and temp[1] == \"n\" ):\n normals += \"(\" + temp.replace(\"vn \",\"\").replace(\" \",\",\") + \"),\"\n continue\n\n if( temp[0] == \"v\" and temp[1] == \"t\" ):\n uvs += \"(\" + temp.replace(\"vt \",\"\").replace(\" \",\",\") + \"),\"\n continue\n\n if( temp[0] == \"f\" ):\n faceVertexCounts += \"3,\"\n tempFace = temp.replace(\"f \",\"\").replace(\" \",\"/\").split(\"/\")\n faceVertexIndices += tempFace[0] + \",\" + tempFace[2] + \",\" + tempFace[4] + \",\"\n continue \n\n normals = normals[:-1]\n points = points[:-1]\n uvs = uvs[:-1]\n faceVertexCounts = faceVertexCounts[:-1]\n faceVertexIndices = faceVertexIndices[:-1]\n\n f = open(f\"{config.rtx_remix_dir}/mods/{mod_dir}/SubUSDs/meshes/\" + x.replace(\".obj\",\".usda\"), \"w\")\n f.write( example_usda.replace(\"$mesh$\",x.replace(\".obj\",\"\")).replace(\"$points$\",points).replace(\"$uvs$\",uvs).replace(\"$normals$\",normals).replace(\"$faceVertexCounts$\",faceVertexCounts).replace(\"$faceVertexIndices$\",faceVertexIndices) ) \n f.close()\n\n \n\n\n\n\n newData = example_mesh.replace(\"$name$\",x.replace(\".obj\",\"\"))\n usda_back = usda_back + newData\n\n\n\n print(\"\\nWriting replacements...\")\n nr = open(replacements_file_dir, \"w\")\n nr.write( replacements.replace(\"$newdata$\",usda_back) )\n nr.close()\n print(\"Done!\")\n\n nr = open(f\"{config.rtx_remix_dir}/mods/{mod_dir}/mod.usda\", \"r\")\n data = nr.read()\n nr.close()\n\n nr = open(f\"{config.rtx_remix_dir}/mods/{mod_dir}/mod.usda\", \"w\")\n nr.write(data)\n nr.close()\n\n #return files\n\n\nif __name__ == '__main__':\n mod_dir, replacements_file = modFolder()\n saveAllTextures(mod_dir, replacements_file)\n\n","repo_name":"alex-suspicious/OctoTex","sub_path":"write_model.py","file_name":"write_model.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"62"} +{"seq_id":"37942976174","text":"import gensim.downloader as download_api\nimport numpy as np\nfrom scipy.cluster.hierarchy import *\nfrom pymystem3 import Mystem\nimport csv\n\ndef convertTagToUniPos(yandexTag):\n mapping = {\n \"A\": \"ADJ\",\n \"ADV\": \"ADV\",\n \"ADVPRO\": \"ADV\",\n \"ANUM\": \"ADJ\",\n \"APRO\": \"DET\",\n \"COM\": \"ADJ\",\n \"CONJ\": \"SCONJ\",\n \"INTJ\": \"INTJ\",\n \"NONLEX\": \"X\",\n \"NUM\": \"NUM\",\n \"PART\": \"PART\",\n \"PR\": \"ADP\",\n \"S\": \"NOUN\",\n \"SPRO\": \"PRON\",\n \"UNKN\": \"X\",\n \"V\": \"VERB\"\n }\n\n return mapping[yandexTag]\n\n\ndef tag(processed):\n try:\n lemma = processed[\"analysis\"][0][\"lex\"].lower().strip()\n pos = processed[\"analysis\"][0][\"gr\"].split(',')[0]\n pos = pos.split('=')[0].strip()\n tagged = lemma + '_' + convertTagToUniPos(pos)\n return tagged\n except Exception:\n return None\n\n\ndef stemAndTag(text):\n m = Mystem()\n allProcessed = m.analyze(text)\n taggedLemmas = map(tag, allProcessed)\n taggedLemmas = list(filter(None, taggedLemmas))\n return taggedLemmas\n\n\ndef filterByModel(tokens, model):\n modelWords = set(model.index2word)\n return list(filter(lambda token: token in modelWords, tokens))\n\n\ndef prepareTopic(topic, model):\n return filterByModel(stemAndTag(topic), model)\n\n\ndef preparedDistance(preparedA, preparedB, model):\n return model.n_similarity(preparedA, preparedB)\n\n\ndef textDistance(textA, textB, model):\n preparedA = filterByModel(stemAndTag(textA), model)\n preparedB = filterByModel(stemAndTag(textB), model)\n\n return preparedDistance(preparedA, preparedB, model)\n\n\nmodel = download_api.load('word2vec-ruscorpora-300')\n\ntopicsFile = open('topics.txt', 'r')\ntopics = [l.strip('\"') for l in topicsFile.read().splitlines()]\ntopicsFile.close()\n\npreparedTopics = []\nfor topic in topics:\n preparedTopics.append(prepareTopic(topic, model))\n\ncountTopics = len(preparedTopics)\ndistanceMatrix = [[0] * countTopics for i in range(countTopics)]\nfor indexA, topicA in enumerate(preparedTopics):\n for indexB, topicB in enumerate(preparedTopics):\n distanceMatrix[indexA][indexB] = preparedDistance(topicA, topicB, model)\ndistanceMatrix = np.array(distanceMatrix)\n\nprint(distanceMatrix)\n\nz = linkage(distanceMatrix, 'ward')\ndendrogram(z)\nclustersCount = 8\nclusterNums = fcluster(z, clustersCount, criterion='maxclust')\n\nresults = [{\"topic\": topic, \"group\": str(clusterNums[index])} for index, topic in enumerate(topics)]\nprint(results)\n\nwith open(\"clusters.csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n for line in results:\n writer.writerow([line['topic'], line['group']])\n output.close()\n","repo_name":"lup-/kubrikrubrik","sub_path":"tagCluster/go.py","file_name":"go.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23354492857","text":"#Sprawdz czy zdany graf jest dwudzielny (rownowazne kolorowaniu grafu 2 kolorami).\r\n\r\n#Wierzchołki w grafie koloruje na 2 kolory: 0 i 1.\r\n#Aby był dwudzielny to nie może pojawić się sytuacja, w której wierzchołki należace do 1 krawędzi miałyby ten sam kolor.\r\n#Implementacja przez listy sąsiedztwa.\r\n#Złożoność: O(V+E).\r\n\r\nfrom queue import Queue\r\n\r\n\r\ndef BFS(G):\r\n q = Queue()\r\n visited = [False] * len(G)\r\n color = [-1] * len(G)\r\n\r\n q.put(0)\r\n visited[0] = True\r\n color[0] = 1\r\n\r\n while not q.empty():\r\n u = q.get()\r\n\r\n for v in G[u]:\r\n\r\n if not visited[v]:\r\n visited[v] = True\r\n # koloruje na inny kolor niż ma u\r\n color[v] = 1 - color[u]\r\n q.put(v)\r\n\r\n if color[u] == color[v]:\r\n return False\r\n\r\n return True\r\n","repo_name":"bgawkuc/ASD-AGH-2021","sub_path":"ASD_tasks/8-11_graphs_bst/ASD_8/1.is_graph_bipartite.py","file_name":"1.is_graph_bipartite.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39832975629","text":"\"\"\"进度条:显示进度+每个epoch的metrics总结(仅val的是平均值,train的似乎是最后一个值;等lightning更新后使用最新版本替换此实现)\n\"\"\"\n\nfrom typing import Any, Optional, Union\nfrom pytorch_lightning.callbacks import RichProgressBar\n\nimport sys\nfrom torch import Tensor\nfrom pytorch_lightning import Trainer\nfrom rich.console import Console, RenderableType\nfrom rich.console import Console, RenderableType\nfrom rich.progress import BarColumn, Progress, ProgressColumn, Task, TaskID, TextColumn\nfrom rich.progress_bar import ProgressBar\nfrom rich.style import Style\nfrom rich.text import Text\nimport math\nfrom datetime import timedelta\nfrom dataclasses import dataclass\n\n\nclass CustomBarColumn(BarColumn):\n \"\"\"Overrides ``BarColumn`` to provide support for dataloaders that do not define a size (infinite size)\n such as ``IterableDataset``.\"\"\"\n\n def render(self, task: \"Task\") -> ProgressBar:\n \"\"\"Gets a progress bar widget for a task.\"\"\"\n return ProgressBar(\n total=max(0, task.total),\n completed=max(0, task.completed),\n width=None if self.bar_width is None else max(1, self.bar_width),\n pulse=not task.started or not math.isfinite(task.remaining),\n animation_time=task.get_time(),\n style=self.style,\n complete_style=self.complete_style,\n finished_style=self.finished_style,\n pulse_style=self.pulse_style,\n )\n\n\n@dataclass\nclass CustomInfiniteTask(Task):\n \"\"\"Overrides ``Task`` to define an infinite task.\n\n This is useful for datasets that do not define a size (infinite size) such as ``IterableDataset``.\n \"\"\"\n\n @property\n def time_remaining(self) -> Optional[float]:\n return None\n\n\nclass CustomProgress(Progress):\n \"\"\"Overrides ``Progress`` to support adding tasks that have an infinite total size.\"\"\"\n\n def add_task(\n self,\n description: str,\n start: bool = True,\n total: float = 100.0,\n completed: int = 0,\n visible: bool = True,\n **fields: Any,\n ) -> TaskID:\n if not math.isfinite(total):\n task = CustomInfiniteTask(\n self._task_index,\n description,\n total,\n completed,\n visible=visible,\n fields=fields,\n _get_time=self.get_time,\n _lock=self._lock,\n )\n return self.add_custom_task(task)\n return super().add_task(description, start, total, completed, visible, **fields)\n\n def add_custom_task(self, task: CustomInfiniteTask, start: bool = True):\n with self._lock:\n self._tasks[self._task_index] = task\n if start:\n self.start_task(self._task_index)\n new_task_index = self._task_index\n self._task_index = TaskID(int(self._task_index) + 1)\n self.refresh()\n return new_task_index\n\n\nclass CustomTimeColumn(ProgressColumn):\n\n # Only refresh twice a second to prevent jitter\n max_refresh = 0.5\n\n def __init__(self, style: Union[str, Style]) -> None:\n self.style = style\n super().__init__()\n\n def render(self, task) -> Text:\n elapsed = task.finished_time if task.finished else task.elapsed\n remaining = task.time_remaining\n elapsed_delta = \"-:--:--\" if elapsed is None else str(timedelta(seconds=int(elapsed)))\n remaining_delta = \"-:--:--\" if remaining is None else str(timedelta(seconds=int(remaining)))\n return Text(f\"{elapsed_delta} • {remaining_delta}\", style=self.style)\n\n\nclass BatchesProcessedColumn(ProgressColumn):\n\n def __init__(self, style: Union[str, Style]):\n self.style = style\n super().__init__()\n\n def render(self, task) -> RenderableType:\n total = task.total if task.total != float(\"inf\") else \"--\"\n return Text(f\"{int(task.completed)}/{total}\", style=self.style)\n\n\nclass ProcessingSpeedColumn(ProgressColumn):\n\n def __init__(self, style: Union[str, Style]):\n self.style = style\n super().__init__()\n\n def render(self, task) -> RenderableType:\n task_speed = f\"{task.speed:>.2f}\" if task.speed is not None else \"0.00\"\n return Text(f\"{task_speed}it/s\", style=self.style)\n\n\nclass MetricsTextColumn(ProgressColumn):\n \"\"\"A column containing text.\"\"\"\n\n def __init__(self, trainer, style):\n self._trainer = trainer\n self._tasks = {}\n self._current_task_id = 0\n self._metrics = {}\n self._style = style\n super().__init__()\n\n def update(self, metrics):\n # Called when metrics are ready to be rendered.\n # This is to prevent render from causing deadlock issues by requesting metrics\n # in separate threads.\n self._metrics = metrics\n\n def render(self, task) -> Text:\n from pytorch_lightning.trainer.states import TrainerFn\n\n if self._trainer.state.fn != TrainerFn.FITTING or self._trainer.sanity_checking:\n return Text(\"\")\n if self._trainer.training and task.id not in self._tasks:\n self._tasks[task.id] = \"None\"\n if self._renderable_cache:\n self._tasks[self._current_task_id] = self._renderable_cache[self._current_task_id][1]\n self._current_task_id = task.id\n if self._trainer.training and task.id != self._current_task_id:\n return self._tasks[task.id]\n _text = \"\"\n\n for k, v in self._metrics.items():\n _text += f\"{k}: {round(v, 3) if isinstance(v, float) else v} \"\n return Text(_text, justify=\"left\", style=self._style)\n\n\nclass LitProgressBar(RichProgressBar):\n \"\"\"A progress bar prints metrics at the end of each epoch\n \"\"\"\n\n def _init_progress(self, trainer):\n if self.is_enabled and (self.progress is None or self._progress_stopped):\n self._reset_progress_bar_ids()\n self._console: Console = Console(force_terminal=True, no_color=True, width=200)\n self._console.clear_live()\n self._metric_component = MetricsTextColumn(trainer, self.theme.metrics)\n self.progress = CustomProgress(\n *self.configure_columns(trainer),\n self._metric_component,\n refresh_per_second=self.refresh_rate_per_second,\n disable=self.is_disabled,\n console=self._console,\n )\n self.progress.start()\n # progress has started\n self._progress_stopped = False\n\n def on_validation_epoch_end(self, trainer: Trainer, pl_module):\n super().on_validation_epoch_end(trainer, pl_module)\n sys.stdout.flush()\n if trainer.is_global_zero:\n metrics = trainer.logged_metrics\n infos = f\"Epoch {trainer.current_epoch} metrics: \"\n for k, v in metrics.items():\n value = v\n if isinstance(v, Tensor):\n value = v.item()\n if isinstance(value, float):\n infos += k + f\"={value:.4f} \"\n else:\n infos += k + f\"={value} \"\n if len(metrics) > 0:\n sys.stdout.write(f'{infos}\\n')\n sys.stdout.flush()\n","repo_name":"Audio-WestlakeU/McNet","sub_path":"src/util/lit_progress_bar.py","file_name":"lit_progress_bar.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"62"} +{"seq_id":"4436804148","text":"from random import randrange\nfrom character import Character\n\nclass Shadow(Character):\n def __init__(self, level = 1, health = 1, power = 1):\n self.health = health\n self.power = power\n self.level = level\n\n def attack(self, hero):\n dodge = randrange(5)\n if dodge != 4:\n hero.health -= self.power\n self.health = 1\n print(f'The {self} dodged your attack and did {self.power} to you!')\n if dodge == 4:\n hero.health -= self.power\n print(f'The {self} did {self.power} damage to you.')\n\n def __str__(self):\n return \"Shadow\"","repo_name":"jtessensohn/python-rpg-project","sub_path":"shadow.py","file_name":"shadow.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1737413453","text":"from leginon import imageprocessor\nimport os\n\nclass FileNames(imageprocessor.ImageProcessor):\n\n\tdef processImageList(self, imagelist):\n\t\tself.logger.info('printing filenames as an example')\n\t\tmrc_files = []\n\t\timagepath = self.session['image path']\n\t\tfor imagedata in imagelist:\n\t\t\tmrc_name = imagedata['filename'] + '.mrc'\n\t\t\tfullname = os.path.join(imagepath, mrc_name)\n\t\t\tmrc_files.append(fullname)\n\n\t\tfor mrc_file in mrc_files:\n\t\t\tprint(mrc_file)\n","repo_name":"nysbc/leginon-py3","sub_path":"leginon/imageprocessorexample.py","file_name":"imageprocessorexample.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"27774878311","text":"from PIL import Image\r\nimport os\r\n#디렉토리 탐색 알고리즘\r\nfor root, dirs, files in os.walk(\".\\\\\"):\r\n height = 0 \r\n print(root)\r\n #해당 디렉토리의 모든 파일들을 리턴\r\n for file in files:\r\n #해당 파일의 확장자가 png만 처리\r\n if not file.endswith(\".png\"):\r\n continue\r\n #root는 탐색 디렉토리 file은 파일이름으로 path 해당파일의 전체경로\r\n path = root + os.sep + file\r\n img = Image.open(path)\r\n if img.width != 690:\r\n continue\r\n print(path, img.width, img.height)\r\n height += img.height\r\n new_height =0\r\n for file in files:\r\n if not file.endswith(\".png\"):\r\n continue\r\n path = root + os.sep + file\r\n img = Image.open(path)\r\n if img.width != 690:\r\n continue\r\n print(path, img.height)\r\n new_img.paste(path, (0, new_height))","repo_name":"viabe/crawler_code","sub_path":"darkweb/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35631209006","text":"# 2021 07 16 이분탐색\n# https://www.acmicpc.net/problem/2805\nimport sys; input = sys.stdin.readline\nfrom collections import Counter\nn, m = map(int, input().split())\ntree = Counter(map(int, input().split()))\nstart = 0\nend = max(tree)\nresult = 0\nwhile start <= end:\n h = (start + end)//2\n get = 0\n for i, cnt in tree.items():\n if i>=h:\n get += (i-h)*cnt\n # 자른 나무의 합이 목표보다 적을 때 절단기의 높이를 낮춰야 한다.\n if get < m:\n end = h-1\n # 자른 나무의 합이 충분할때 절단기의 높이를 높인다.\n else:\n start = h+1\n result = h\nprint(result)","repo_name":"minho511/algorithm_solution","sub_path":"baekjoon_python/[2805] 나무 자르기.py","file_name":"[2805] 나무 자르기.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11327742270","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom imutils.video import VideoStream\r\nfrom imutils.video import FPS\r\nimport numpy as np\r\nimport argparse\r\nimport imutils\r\nimport time\r\nimport cv2\r\n\r\nCLASSES = [\" \", \"30\", \"school\"]\r\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\r\n\r\n# load our serialized model from disk\r\nprint(\"[INFO] loading model...\")\r\nnet = cv2.dnn.readNetFromTensorflow(\"frozen_inference_graph.pb\", \"warning.pbtxt\")\r\n\r\n# initialize the video stream, allow the cammera sensor to warmup,\r\n# and initialize the FPS counter\r\nprint(\"[INFO] starting video stream...\")\r\nvs = VideoStream(src=0).start()\r\ntime.sleep(2.0)\r\nfps = FPS().start()\r\n\r\n# loop over the frames from the video stream\r\nwhile True:\r\n # grab the frame from the threaded video stream and resize it\r\n # to have a maximum width of 400 pixels\r\n frame = vs.read()\r\n frame = imutils.resize(frame, width=400)\r\n\r\n # grab the frame dimensions and convert it to a blob\r\n (h, w) = frame.shape[:2]\r\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (640, 480)), size=(640, 480), swapRB=True)\r\n\r\n # pass the blob through the network and obtain the detections and\r\n # predictions\r\n net.setInput(blob)\r\n detections = net.forward()\r\n\r\n # loop over the detections\r\n\r\n for i in np.arange(0, detections.shape[2]):\r\n # extract the confidence (i.e., probability) associated with\r\n # the prediction\r\n confidence = detections[0, 0, i, 2]\r\n\r\n # filter out weak detections by ensuring the `confidence` is\r\n # greater than the minimum confidence\r\n if confidence > 0.8:\r\n # extract the index of the class label from the\r\n # `detections`, then compute the (x, y)-coordinates of\r\n # the bounding box for the object\r\n idx = int(detections[0, 0, i, 1])\r\n # print(idx)\r\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\r\n (startX, startY, endX, endY) = box.astype(\"int\")\r\n\r\n # draw the prediction on the frame\r\n label = \"{}: {:.2f}%\".format(CLASSES[idx], confidence * 100)\r\n cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)\r\n y = startY - 15 if startY - 15 > 15 else startY + 15\r\n cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\r\n\r\n # show the output frame\r\n cv2.imshow(\"Frame\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n\r\n # if the `q` key was pressed, break from the loop\r\n if key == ord(\"q\"):\r\n break\r\n\r\n # update the FPS counter\r\n fps.update()\r\n\r\n# stop the timer and display FPS information\r\nfps.stop()\r\nprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\r\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\r\n\r\n# do a bit of cleanup\r\ncv2.destroyAllWindows()\r\nvs.stop()\r\n\r\n\r\n# Create your views here.\r\n\r\n\r\ndef index(request):\r\n print(\"Hello\")\r\n return render(request, 'WarningSystem/index.html', {})\r\n\r\n\r\ndef print1(request):\r\n speed = request.GET.get('speed')\r\n print(\"PRINT\")\r\n print(speed)\r\n\r\n if speed:\r\n\r\n\r\n return JsonResponse({\r\n 'class' : \"Hello World\"\r\n })\r\n\r\n # return HttpResponse()","repo_name":"odin58/Warning-system","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"944296541","text":"class Book:\n def __init__(self, title=\"\", author=\"\", publisher=\"\", copyright=\"\"):\n self.title = title\n self.author = author\n self.publisher = publisher\n self.copyright = copyright\n\n def __str__(self):\n return (\"Title: %sAuthor: %sPublisher: %sCopyright: %s\" %\n (self.title, self.author, self.publisher, self.copyright))\n\n\nif __name__ == \"__main__\":\n with open(\"books.txt\") as book_list:\n library = []\n for line in book_list:\n title = line\n author = book_list.readline()\n publisher = book_list.readline()\n copyright = book_list.readline()\n library.append(Book(title, author, publisher, copyright))\n print(\"The library has %d books:\" % len(library))\n for books in library:\n print(books)\n","repo_name":"levi-terry/CSCI135","sub_path":"library/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18662105276","text":"import pandas as pd\n\n\ndef rising_temperature(weather: pd.DataFrame) -> pd.DataFrame:\n # Date to datetime\n weather[\"recordDate\"] = pd.to_datetime(weather[\"recordDate\"])\n\n # Sort by date\n weather.sort_values(\"recordDate\", inplace=True)\n\n # Calculate previous day distance in days\n weather[\"prev_day_distance\"] = (\n weather[\"recordDate\"].shift(1) - weather[\"recordDate\"]\n ).dt.days\n\n # Create yesterday's temp column\n weather[\"temperature_yesterday\"] = weather[\"temperature\"].shift(1)\n\n # Check if previous day distance is -1 and temp got higher\n return weather[\n (weather[\"prev_day_distance\"] == -1)\n & (weather[\"temperature\"] > weather[\"temperature_yesterday\"])\n ][[\"id\"]]\n","repo_name":"dvdblk/leetcode","sub_path":"problems/0197-rising-temperature/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"20894003570","text":"import sys\nimport pymod.mc\nimport pymod.collection\n\ndescription = \"Manipulate collections of modules\"\nlevel = \"short\"\nsection = \"collections\"\n\n\n_subcommands = {}\n\n\ndef add_avail_command(parser):\n def avail(args):\n s = pymod.collection.avail(terse=args.terse, regex=args.regex)\n sys.stderr.write(s)\n\n p = parser.add_parser(\"avail\", help=\"List available (saved) collections\")\n p.add_argument(\n \"regex\",\n nargs=\"?\",\n metavar=\"regex\",\n help='Highlight available modules matching \"regex\"',\n )\n p.add_argument(\n \"-t\",\n \"--terse\",\n action=\"store_true\",\n default=False,\n help=\"Display output in terse format [default: %(default)s]\",\n )\n _subcommands[\"avail\"] = avail\n\n\ndef add_save_command(parser):\n def save(args):\n return pymod.mc.collection.save(args.name)\n\n p = parser.add_parser(\"save\", help=\"Save the current environment\")\n p.add_argument(\n \"name\",\n nargs=\"?\",\n default=pymod.names.default_user_collection,\n help=\"Name of collection to save\",\n )\n _subcommands[\"save\"] = save\n\n\ndef add_add_to_loaded_collection_command(parser):\n def add_to_loaded_collection(args):\n return pymod.mc.collection.add_to_loaded_collection(args.name)\n\n p = parser.add_parser(\"add\", help=\"Add module to currently loaded collection\")\n p.add_argument(\n \"name\",\n default=pymod.names.default_user_collection,\n help=\"Name of module to add to currently loaded collection\",\n )\n _subcommands[\"add\"] = add_to_loaded_collection\n\n\ndef add_pop_from_loaded_collection_command(parser):\n def pop_from_loaded_collection(args):\n return pymod.mc.collection.pop_from_loaded_collection(args.name)\n\n p = parser.add_parser(\"pop\", help=\"Pop module from currently loaded collection\")\n p.add_argument(\n \"name\",\n default=pymod.names.default_user_collection,\n help=\"Name of module to pop from currently loaded collection\",\n )\n _subcommands[\"pop\"] = pop_from_loaded_collection\n\n\ndef add_show_command(parser):\n def show(args):\n return pymod.mc.collection.show(args.name)\n\n p = parser.add_parser(\n \"show\", help=\"Show actions that would be taken by restoring the collection\"\n )\n p.add_argument(\n \"name\",\n nargs=\"?\",\n default=pymod.names.default_user_collection,\n help=\"Name of collection to show\",\n )\n _subcommands[\"show\"] = show\n\n\ndef add_remove_command(parser):\n def remove(args):\n return pymod.mc.collection.remove(args.name)\n\n p = parser.add_parser(\"remove\", help=\"Remove collection\")\n p.add_argument(\"name\", help=\"Name of collection to remove\")\n _subcommands[\"remove\"] = remove\n\n\ndef add_restore_command(parser):\n def restore(args):\n pymod.mc.collection.restore(args.name)\n pymod.mc.dump()\n\n p = parser.add_parser(\"restore\", help=\"Restore collection\")\n p.add_argument(\n \"name\",\n nargs=\"?\",\n default=pymod.names.default_user_collection,\n help=\"Name of collection to restore\",\n )\n _subcommands[\"restore\"] = restore\n\n\ndef setup_parser(subparser):\n \"\"\"Parser is only constructed so that this prints a nice help\n message with -h. \"\"\"\n sp = subparser.add_subparsers(metavar=\"SUBCOMMAND\", dest=\"subcommand\")\n add_avail_command(sp)\n add_save_command(sp)\n add_show_command(sp)\n add_remove_command(sp)\n add_restore_command(sp)\n add_add_to_loaded_collection_command(sp)\n add_pop_from_loaded_collection_command(sp)\n\n\ndef collection(parser, args):\n _subcommands[args.subcommand](args)\n","repo_name":"tjfulle/Modulecmd.py","sub_path":"lib/pymod/pymod/command/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74251885318","text":"\"\"\"SQLAlchemy core support.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Type\n\nfrom muffin_rest.filters import Filter\nfrom muffin_rest.sqlalchemy import SARESTHandler, SARESTOptions\nfrom muffin_rest.sqlalchemy.filters import SAFilter\nfrom sqlalchemy import JSON, Enum, Text\n\nfrom muffin_admin.handler import AdminHandler, AdminOptions\n\nif TYPE_CHECKING:\n import marshmallow as ma\n\n from muffin_admin.types import TRAInfo\n\n\nclass SAAdminOptions(AdminOptions, SARESTOptions):\n \"\"\"Keep SAAdmin options.\"\"\"\n\n def setup(self, cls):\n \"\"\"Auto insert filter by id.\"\"\"\n super(SAAdminOptions, self).setup(cls)\n\n for f in self.filters:\n if isinstance(f, Filter):\n f = f.name # noqa:\n\n if f == \"id\":\n break\n\n else:\n self.filters = [SAFilter(\"id\", field=self.table_pk), *self.filters]\n\n\nclass SAAdminHandler(AdminHandler, SARESTHandler):\n \"\"\"Work with SQLAlchemy Core.\"\"\"\n\n meta_class: Type[SAAdminOptions] = SAAdminOptions\n meta: SAAdminOptions\n\n @classmethod\n def to_ra_field(cls, field: ma.fields.Field, source: str) -> TRAInfo:\n \"\"\"Setup RA fields.\"\"\"\n column = getattr(cls.meta.table.c, field.attribute or source, None)\n refs = dict(cls.meta.ra_refs)\n if column is not None:\n if column.foreign_keys and column.name in refs:\n ref_data = refs[column.name]\n fk = list(column.foreign_keys)[0]\n return \"FKField\", {\n \"source\": source,\n \"refKey\": ref_data.get(\"key\") or fk.column.name,\n \"refSource\": ref_data.get(\"source\") or fk.column.name,\n \"reference\": ref_data.get(\"reference\") or fk.column.table.name,\n }\n\n if isinstance(column.type, JSON):\n return \"JsonField\", {}\n\n return super(SAAdminHandler, cls).to_ra_field(field, source)\n\n @classmethod\n def to_ra_input(cls, field: ma.fields.Field, source: str) -> TRAInfo:\n \"\"\"Setup RA inputs.\"\"\"\n column = getattr(cls.meta.table.c, field.attribute or source, None)\n ra_type, props = super(SAAdminHandler, cls).to_ra_input(field, source)\n refs = dict(cls.meta.ra_refs)\n if column is not None:\n if column.foreign_keys and (source in refs):\n ref_data = refs[source]\n fk = list(column.foreign_keys)[0]\n return \"FKInput\", dict(\n props,\n emptyValue=None if column.nullable else \"\",\n refSource=ref_data.get(\"source\") or fk.column.name,\n refKey=ref_data.get(\"key\") or fk.column.name,\n reference=ref_data.get(\"reference\") or fk.column.table.name,\n )\n\n if isinstance(column.type, Enum):\n return \"SelectInput\", dict(\n props,\n choices=[{\"id\": c.value, \"name\": c.name} for c in column.type.enum_class],\n )\n\n if isinstance(column.type, Text):\n return \"TextInput\", dict(props, multiline=True)\n\n if isinstance(column.type, JSON):\n return \"JsonInput\", props\n\n return ra_type, props\n","repo_name":"klen/muffin-admin","sub_path":"muffin_admin/sqlalchemy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"62"} +{"seq_id":"11246205587","text":"from .helpers import GeoFieldWrapper\n\n\ndef Point(x, y):\n return GeoFieldWrapper(\"POINT(%f %f)\" % (x, y))\n\n\ndef Line(*coordinates):\n return GeoFieldWrapper(\n \"LINESTRING(%s)\" % ','.join(\"%f %f\" % point for point in coordinates)\n )\n\n\ndef Polygon(*coordinates_groups):\n try:\n if not isinstance(coordinates_groups[0][0], (tuple, list)):\n coordinates_groups = (coordinates_groups,)\n except Exception:\n pass\n return GeoFieldWrapper(\n \"POLYGON(%s)\" % (\n \",\".join([\n \"(%s)\" % \",\".join(\"%f %f\" % point for point in group)\n for group in coordinates_groups\n ])\n )\n )\n\n\ndef MultiPoint(*points):\n return GeoFieldWrapper(\n \"MULTIPOINT(%s)\" % (\n \",\".join([\n \"(%f %f)\" % point for point in points\n ])\n )\n )\n\n\ndef MultiLine(*lines):\n return GeoFieldWrapper(\n \"MULTILINESTRING(%s)\" % (\n \",\".join([\n \"(%s)\" % \",\".join(\"%f %f\" % point for point in line)\n for line in lines\n ])\n )\n )\n\n\ndef MultiPolygon(*polygons):\n return GeoFieldWrapper(\n \"MULTIPOLYGON(%s)\" % (\n \",\".join([\n \"(%s)\" % (\n \",\".join([\n \"(%s)\" % \",\".join(\"%f %f\" % point for point in group)\n for group in polygon\n ])\n ) for polygon in polygons\n ])\n )\n )\n","repo_name":"emmett-framework/emmett","sub_path":"emmett/orm/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":890,"dataset":"github-code","pt":"62"} +{"seq_id":"73966622278","text":"#!/usr/bin/env python3\n\n# https://leetcode-cn.com/problems/maximum-product-subarray\n# 给定一个整数数组 nums ,找出一个序列中乘积最大的连续子序列(该序列至少包含一个数)。\n#\n# 示例 1:\n# 输入: [2,3,-2,4]\n# 输出: 6\n# 解释: 子数组 [2,3] 有最大乘积 6。\n#\n# 示例 2:\n# 输入: [-2,0,-1]\n# 输出: 0\n# 解释: 结果不能为 2, 因为 [-2,-1] 不是子数组。\n\n\nclass Solution:\n def maxProduct(self, nums: [int]) -> int:\n resMin = nums[0]\n resMax = nums[0]\n minValue = nums[0]\n maxValue = nums[0]\n\n for num in nums[1:]:\n minV = minValue\n maxV = maxValue\n minValue = min(minV * num, maxV * num, num)\n maxValue = max(minV * num, maxV * num, num)\n resMin = min(resMin, minValue)\n resMax = max(resMax, maxValue)\n return resMax\n\n\nprint(Solution().maxProduct([2, 3, -2, 4])) # 6\nprint(Solution().maxProduct([-2, 0, -1]))\n","repo_name":"HeDefine/LeetCodePractice","sub_path":"Q152.乘积最大子序列.py","file_name":"Q152.乘积最大子序列.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74697413316","text":"import requests\nimport json\n\n#prepare for the header info\nuser_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'\nheaders = {}\nheaders['user-agent'] = user_agent\n\n\n#test GET\ngeturl = 'http://httpbin.org/get'\ngetresponse = requests.get(geturl,headers=headers).text\ngetresult = json.dumps('get response is %s' % getresponse)\nprint(getresult)\n\n\n#test POST\nposturl = 'http://httpbin.org/post'\npostresponse = requests.post(geturl,data={}, headers=headers).text\npostresult = json.dumps('post response is %s' % postresponse)\nprint(postresult)","repo_name":"Python000-class01/Python000-class01","sub_path":"Week_01/G20200389010046/week01_0046_ex2.py","file_name":"week01_0046_ex2.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"62"} +{"seq_id":"31058451179","text":"import requests\r\n\r\nyear = 2011\r\n\r\nmaximum_goals = 10\r\nmatch_count = 0\r\n\r\nfor j in range(0, maximum_goals):\r\n url = 'https://jsonmock.hackerrank.com/api/football_matches?year='+str(year)+'&team1goals='+str(j)+'&team2goals='+str(j)\r\n response = requests.get(url).json()\r\n match_count += response['total']\r\nprint(match_count)","repo_name":"Pratcode/Rest_Api_Intermediate","sub_path":"Que1-Sol2(main).py","file_name":"Que1-Sol2(main).py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15340678795","text":"from torch.utils.data import Dataset\nimport numpy as np\nimport io\nfrom PIL import Image\nimport os\nimport json\nimport random\nfrom synthesis.utils.misc import instantiate_from_config\n\ndef load_img(filepath):\n img = Image.open(filepath).convert('RGB')\n return img\n\nclass CocoDataset(Dataset):\n def __init__(self, data_root, negative_sample_path ,phase = 'train', im_preprocessor_config=None):\n self.transform = instantiate_from_config(im_preprocessor_config)\n self.root = os.path.join(data_root, phase)\n # input_file = os.path.join(data_root, input_file)\n caption_file = \"captions_\"+phase+\"2014.json\"\n caption_file = os.path.join(data_root, \"annotations\", caption_file)\n\n self.json_file = json.load(open(caption_file, 'r'))\n print(\"length of the dataset is \")\n print(len(self.json_file['annotations']))\n\n # print(\"check json_file:\", self.json_file['annotations'][1])\n # exit()\n\n self.num = len(self.json_file['annotations'])\n self.image_prename = \"COCO_\" + phase + \"2014_\"\n self.folder_path = os.path.join(data_root, phase+'2014', phase+'2014')\n\n\n self.negative_sample_path = negative_sample_path\n self.phase = phase\n if self.phase == 'train' and self.negative_sample_path != None:\n # print(\"negative_sample_path:\", negative_sample_path)\n with open(negative_sample_path, 'r') as f:\n self.extra_img = json.load(f)\n # self.extra_img = os.path.join()\n print(\"negative_sample_path:\", negative_sample_path, len(self.extra_img))\n # print(\"check path:\", self.extra_img[0])\n else:\n self.extra_img = None\n\n\n \n def __len__(self):\n return self.num\n \n def __getitem__(self, index):\n this_item = self.json_file['annotations'][index]\n caption = this_item['caption'].lower()\n # print(\"check data loader:\", this_item, caption)\n image_name = str(this_item['image_id']).zfill(12)\n image_path = os.path.join(self.folder_path, self.image_prename+image_name+'.jpg')\n image = load_img(image_path)\n image = np.array(image).astype(np.uint8)\n image = self.transform(image = image)['image']\n if self.phase == 'train' and self.extra_img != None:\n neg_sample = self.extra_img[index]\n for i in range(len(neg_sample)):\n neg_img_name = str(neg_sample[i]).zfill(12)\n neg_img_path = os.path.join(self.folder_path, self.image_prename+neg_img_name+'.jpg')\n # print(\"neg_img_path:\", i, neg_img_path)\n img = load_img(neg_img_path)\n img = np.array(img).astype(np.uint8)\n img = self.transform(image = img)['image']\n if i == 0:\n neg_img = np.expand_dims(img, axis=0)\n else:\n img = np.expand_dims(img, axis=0)\n neg_img = np.concatenate((neg_img, img), axis=0) \n data = {\n 'image': np.transpose(image.astype(np.float32), (2, 0, 1)),\n 'text': caption,\n 'negative_img': np.transpose(neg_img.astype(np.float32), (0, 3, 1, 2)),\n }\n else:\n data = {\n 'image': np.transpose(image.astype(np.float32), (2, 0, 1)),\n 'text': caption,\n } \n\n return data\n","repo_name":"L-YeZhu/CDCD","sub_path":"synthesis/data/mscoco_dataset.py","file_name":"mscoco_dataset.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"62"} +{"seq_id":"41623393901","text":"from twisted.web.server import Site\nfrom twisted.web.resource import Resource\nfrom twisted.internet import reactor, endpoints\nfrom cgi import html\n\nclass FormPage(Resource):\n def render_GET(self, request):\n return (b\"\"\n b\"\"\n b\"
\")\n\n def render_POST(self, request):\n args = request.args[b\"the-field\"][0].decode(\"utf-8\")\n escapedArgs = html.escape(args)\n return (b\"\"\n b\"\"\n b\"You submitted: \" + escapedArgs.encode('utf-8'))\n\nroot = Resource()\nroot.putChild(b\"form\", FormPage())\nfactory = Site(root)\nendpoint = endpoints.TCP4ServerEndpoint(reactor, 8880)\nendpoint.listen(factory)\nreactor.run()","repo_name":"hoovejd/twisted_examples","sub_path":"007_handling_posts.py","file_name":"007_handling_posts.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12039836288","text":"import datetime\n\nfrom decorator import decorator\n\n\nclass CrashAfterException(Exception):\n message = \"This method has stopped working\"\n\n\n@decorator\ndef crash_after(func, date: str = None, *args, **kwargs):\n if date is None:\n return func(*args, **kwargs)\n\n now = datetime.datetime.now()\n _crash_after = datetime.datetime.strptime(date, \"%m/%d/%Y\")\n if _crash_after < now:\n raise CrashAfterException(\n \"This method expired in its current form on {0}\".format(date)\n )\n\n return func(*args, **kwargs)\n","repo_name":"zackkitzmiller/crash-after","sub_path":"crash_after/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38552476288","text":"def insertion_sort(A):\n for i in range(1, len(A)):\n j = i -1\n while A[j] > A[i] and j >= 0:\n temp = A[j]\n A[j] = A[i]\n A[i] = temp\n i -= 1\n j -= 1\n return A\n\n# 요소의 이동이 잦다\n# string이 이미 정렬이 많이 되있는 상태일 때 가장 효율적\nA = [12, 9, 3, 7, 14, 11]\n\nprint(insertion_sort(A))\n\n","repo_name":"YONGJINJO/Python","sub_path":"Algorithms_Unlocked/Chap 3/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24046462982","text":"import random\nfrom Deck import Deck\nfrom Jogador import Jogador\n\nclass Poker:\n \n def contar_pontuacao(self, player, valor_aposta):\n valores = []\n naipes = []\n qtd = [0 for _ in range(14)]\n\n # criando listas para auxiliar na verificacao dos ganhos\n for carta in player.cartas:\n valores.append(carta.valor)\n naipes.append(carta.naipe)\n qtd[carta.valor-2] += 1\n\n # ordenando os valores para ajudar na comparacao\n valores.sort()\n \n if self.isRoyalStraightFlush(valores, naipes):\n return valor_aposta * 200\n \n if self.isStraightFlush(valores, naipes):\n return valor_aposta * 100\n \n if self.isQuadra(valores):\n return valor_aposta * 50\n \n if self.isFullHand(valores):\n return valor_aposta * 20\n \n if self.isFlush(naipes):\n return valor_aposta * 10\n \n if self.isStraight(valores):\n return valor_aposta * 5\n \n if self.isTrinca(qtd):\n return valor_aposta * 2\n \n if self.isDupla(qtd):\n return valor_aposta\n \n return 0\n \n def jogar(self, jogador, deck):\n \n while jogador.saldo > 0:\n print(f'Saldo atual: {jogador.saldo}')\n \n # laco para aceitar um valor valido do usuario ou F para sair\n while True:\n resposta = (input('Digite o valor que quer apostar ou para \"F\" sair: '))\n if resposta == 'F':\n return\n \n # verificando se o valor digitado eh valido\n try:\n vlr_aposta = int(resposta)\n if 0 < vlr_aposta <= jogador.saldo:\n jogador.subtrai_saldo(vlr_aposta)\n break\n else:\n print('Valor invalido!')\n \n # entrada nao eh o caracter F e nem numerico\n except ValueError:\n print(f'Entrada nao valida!')\n \n # embaralhando a cada rodada\n deck.embaralha_baralho()\n \n # pegando uma \"mao\" nova na rodada\n jogador.criar_cartas(deck)\n jogador.printa_cartas()\n \n #fazendo ate 3 trocas de cartas\n for i in range(2):\n resposta = input('Digite as cartas que quer trocar(digite enter para nao trocar): ')\n if resposta == '':\n break\n\n trocas = [int(x) for x in resposta.split()]\n jogador.trocar_cartas(trocas, deck)\n jogador.printa_cartas()\n \n \n vlr_ganho = self.contar_pontuacao(jogador, vlr_aposta)\n jogador.adiciona_saldo(vlr_ganho)\n print(f'Parabens vc ganhou {vlr_ganho} !!!')\n \n # resetando as cartas do jogador\n jogador.devolver_cartas(deck)\n \n \n def isRoyalStraightFlush(self, valores, naipes):\n if self.isStraightFlush(valores, naipes) == False:\n return False\n \n # ao ordenar os valores consigo verificar se a listas sao iguais\n return valores == [10, 11, 12, 13, 14]\n \n \n def isStraightFlush(self, valores, naipes):\n # verificando se tenho apenas 1 naipe\n if self.isStraight(valores) == False:\n return False\n \n if self.isFlush(naipes) == False:\n return False\n \n return True\n \n def isQuadra(self, valores):\n # verificando se tenho apenas cartas com dois valores diferentes\n if len(set(valores)) != 2:\n return False\n \n # contando quantas vezes a carta do meio aparece, como esta ordenado, para ser uma quadra,\n # ela devera aparecer extamente 4 vezes\n carta_do_meio = valores[len(valores) // 2]\n contador = 0\n for i in range(len(valores)):\n if valores[i] == carta_do_meio:\n contador += 1\n \n if contador != 4:\n return False\n \n return True\n\n def isFullHand(self, valores):\n # se tenho mais que dois tipos de valores, quer dizer que nao \n # tenho uma trinca e um par\n if len(set(valores)) != 2:\n return False\n \n # como ja verifiquei que nao eh uma quadra anteriormente,\n # eh garantido que se tenho apenas cartas com dois valores\n # diferentes, que tenho um full hand\n return True\n \n \n def isFlush(self, naipes):\n # verificando se tenho apenas um naipe\n return len(set(naipes)) == 1\n \n def isStraight(self, valores):\n #verificando se a carta na posicao i eh igual a proxima carta + 1\n for i in range(len(valores)-1):\n if valores[i]+1 != valores[i+1]:\n return False\n \n return True\n \n def isTrinca(self, qtd):\n return qtd.count(3) == 1\n \n def isDupla(self, qtd):\n return qtd.count(2) == 2\n \n","repo_name":"LeonardoIshida/oop","sub_path":"python/poker/Poker.py","file_name":"Poker.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38100763419","text":"options = [\"dog\", \"deer\", \"deal\"]\n\nclass Tree(object):\n \"Generic tree node.\"\n def __init__(self, name='root', children=None):\n self.name = name\n self.children = dict()\n if children is not None:\n for child in children:\n self.add_child(child)\n def __repr__(self):\n return self.name\n def add_child(self, node):\n assert isinstance(node, Tree)\n if node.name not in self.children:\n self.children[node.name] = Tree(name=node.name)\n def follow(self, path):\n result = self.children\n for p in path:\n if p in result:\n result = result[p].children\n else:\n result = None\n break\n\n\n'''\ndef convert_list_to_tree(options):\n root = Tree()\n for option in options:\n optionTree = Tree()\n for i in option:\n optionTree.name = l\n optionTree.add\n'''","repo_name":"Felizolinha/DailyCodingChallenge","sub_path":"11b.py","file_name":"11b.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23554018304","text":"\n#First iterate through the first loop\n\nfor i in range (0 , 10):\n for j in range (0 , 10):\n# we need to make sure we don't print identical no's and identical combination\n\n if (i != j) and (\"{}{}\".format(i,j) < \"{}{}\".format(j,i)) and (\"{}{}\".format(i,j) != '89'):\n print(\"{}{}\".format(i,j), end= \", \")\n# make sure that the last number is not printed with comma\n elif(\"{}{}\".format(i,j) == '89'):\n print(\"{}{}\".format(i,j))\n","repo_name":"yilae-scene/alx_python","sub_path":"python-hello_world/6-print_comb3.py","file_name":"6-print_comb3.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16366927338","text":"import logging\n\nfrom django_scopes import scope, scopes_disabled\n\nfrom pretalx.celery_app import app\nfrom pretalx.event.models import Event\n\nLOGGER = logging.getLogger(__name__)\n\n\n@app.task()\ndef export_schedule_html(*, event_id: int, make_zip=True):\n from django.core.management import call_command\n\n with scopes_disabled():\n event = (\n Event.objects.prefetch_related(\"submissions\").filter(pk=event_id).first()\n )\n if not event:\n LOGGER.error(f\"Could not find Event ID {event_id} for export.\")\n return\n\n with scope(event=event):\n if not event.current_schedule:\n LOGGER.error(\n f\"Event {event.slug} could not be exported: it has no schedule.\"\n )\n return\n\n cmd = [\"export_schedule_html\", event.slug]\n if make_zip:\n cmd.append(\"--zip\")\n call_command(*cmd)\n","repo_name":"pretalx/pretalx","sub_path":"src/pretalx/agenda/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":592,"dataset":"github-code","pt":"62"} +{"seq_id":"40554810231","text":"#to print a fibonacci series\r\nm=0\r\nn=1\r\nnum=int(input(\"enter the number till fibonacci numbers has to be found\"))\r\nprint(m,n,sep=\",\",end=\",\")\r\nfor i in range(0,num):\r\n fab=m+n\r\n m=n\r\n n=fab\r\n print(fab,end=\",\")\r\n\r\n\r\n \r\n","repo_name":"RANJITHp07/MyCaptain-python-assignment","sub_path":"fibonaccci series.py","file_name":"fibonaccci series.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2845281878","text":"\"\"\"\nGiven an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.\n\nYour algorithm's runtime complexity must be in the order of O(log n).\n\nIf the target is not found in the array, return [-1, -1].\n\nExample 1:\n\nInput: nums = [5,7,7,8,8,10], target = 8\nOutput: [3,4]\nExample 2:\n\nInput: nums = [5,7,7,8,8,10], target = 6\nOutput: [-1,-1]\n\n\"\"\"\nclass Solution(object):\n def searchRange(self, nums, target):\n left = 0\n right = len(nums) - 1\n start = end = 0\n while left <= right:\n mid = (left+right)//2\n if nums[mid] == target:\n start = end = mid\n while start>= 1 and nums[start-1] == target: #注意:index不能out of range\n start -= 1\n while end<= len(nums)-2 and nums[end+1] == target: #注意:index不能out of range\n end += 1\n return [start, end]\n elif nums[mid] < target:\n left = mid + 1\n elif nums[mid] > target:\n right = mid - 1\n return [-1, -1]\n\nobj = Solution()\nnums = [5,7,7,8,8,10]\nprint(obj.searchRange(nums, 8))\n","repo_name":"lixuanhong/LeetCode","sub_path":"FirstLastElSortedArray.py","file_name":"FirstLastElSortedArray.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30025275818","text":"from django.forms import ModelForm,widgets,modelformset_factory\nfrom django import forms\nfrom .models import List,Task\n\nclass ListForm(ModelForm):\n class Meta:\n model = List\n fields = ['title']\n labels = {'title':''}\n\n def __init__(self, *args, **kwargs):\n super(ListForm,self).__init__(*args, **kwargs)\n\n for name,field in self.fields.items():\n field.widget.attrs.update({'onChange':'submit();',\n 'style':'border:none;max-width:200px;',\n 'class':'bg-light',\n 'placeholder':'Enter list name...',})\n\n\n#To use widgets use exactly \"from django import forms\"\nTaskFormSet = modelformset_factory(\n Task, fields=('completed','name'),\n labels={'completed':'completed','name':'name'},\n widgets={\n 'completed': forms.CheckboxInput(attrs={\n 'onChange':'submit();',\n 'class':'faChkRnd',\n }),\n 'name':forms.Textarea(attrs={\n 'onChange':'submit();',\n # 'oninput':'this.style.height = \"\";this.style.height = this.scrollHeight + 3+\"px\"',\n 'rows':'2',\n 'class':'get-striked',\n 'placeholder':'+ Add new task...'\n }) \n },\n )\n \nclass ShareForm(forms.Form):\n coowner = forms.CharField(label='', max_length=100)\n\n def __init__(self, *args, **kwargs):\n super(ShareForm,self).__init__(*args, **kwargs)\n\n for name,field in self.fields.items():\n field.widget.attrs.update({ 'class':'form-control',\n 'placeholder':'Enter username or email of ListMaker...'})","repo_name":"JacekHordyj/sharedo","sub_path":"lists/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70315060678","text":"from caltechdata_api import caltechdata_edit\nfrom caltechdata_api import caltechdata_write\nfrom datacite import DataCiteRESTClient\nimport os, csv, json, argparse, subprocess, glob, datetime, requests, copy\nfrom upload_files import upload_files\n\n# Switch for test or production\nproduction = True\n# Location where TCCON metadata application puts its files\nmetadata_path = \"/var/www/tccon-metadata/\"\n# TCCON Site Info File Name\nsite_info_fname = \"site_info.json\"\n# DOI Metatata Location\ndoi_metadata = \"/var/www/tccon-metadata/doi-metadata/\"\n# Data File Location\ndata_location = \"/data/tccon/3a-std-public/\"\n\ntoken = os.environ[\"RDMTOK\"]\npassword = os.environ[\"DATACITE\"]\n\nparser = argparse.ArgumentParser(description=\"Upload a new TCCON site to CaltechDATA\")\nparser.add_argument(\n \"sid\",\n metavar=\"ID\",\n type=str,\n nargs=\"+\",\n help=\"The TCCON two letter Site ID (e.g. pa for park falls)\",\n)\nargs = parser.parse_args()\n\n# For each new site release\nfor skey in args.sid:\n # Get data file\n sitef = glob.glob(f\"{data_location}{skey}*.nc\")\n if len(sitef) != 1:\n print(f\"Cannot find public file for site {skey} in {data_location}\")\n exit()\n else:\n sitef = sitef[0]\n\n # Prep metadata\n site_file = open(metadata_path + site_info_fname, \"r\")\n tccon_sites = json.load(site_file)\n site_info = tccon_sites[skey]\n site_name = site_info[\"long_name\"]\n site_doi = site_info[\"data_doi\"]\n version = site_info[\"data_revision\"]\n location = tccon_sites[skey][\"location\"]\n # Get contact information from form \"name \"\n site_contact = site_info[\"contact\"]\n split_contact = site_contact.split(\"<\")\n contact_name = split_contact[0]\n contact_email = split_contact[1].split(\">\")[0]\n\n # Get Metadata for DOI\n meta_file = open(f\"{doi_metadata}{skey}_{site_name}.json\", \"r\")\n metadata = json.load(meta_file)\n\n # Dates\n today = datetime.date.today().isoformat()\n sfname = sitef.split(\"3a-std-public/\")[1]\n cred = (\n sfname[2:6]\n + \"-\"\n + sfname[6:8]\n + \"-\"\n + sfname[8:10]\n + \"/\"\n + sfname[11:15]\n + \"-\"\n + sfname[15:17]\n + \"-\"\n + sfname[17:19]\n )\n metadata[\"dates\"] = [\n {\"dateType\": \"Collected\", \"date\": cred},\n {\"dateType\": \"Updated\", \"date\": today},\n {\"dateType\": \"Created\", \"date\": today},\n ]\n metadata[\"publicationDate\"] = today\n year = today.split(\"-\")[0]\n metadata[\"publicationYear\"] = year\n\n # Standard cleanup\n metadata.pop(\"__last_modified__\")\n metadata[\"fundingReferences\"] = metadata.pop(\"FundingReference\")\n metadata[\"identifiers\"] = [{\"identifierType\": \"DOI\", \"identifier\": site_doi}]\n metadata[\"publisher\"] = \"CaltechDATA\"\n metadata[\"types\"] = {\"resourceTypeGeneral\": \"Dataset\", \"resourceType\": \"Dataset\"}\n metadata[\"schemaVersion\"] = \"http://datacite.org/schema/kernel-4\"\n metadata[\"version\"] = version\n metadata[\"descriptions\"] = [\n {\n \"descriptionType\": \"Abstract\",\n \"description\": \"\"\"The Total Carbon Column Observing Network (TCCON) is\n a network of ground-based Fourier Transform Spectrometers that record direct\n solar absorption spectra of the atmosphere in the near-infrared. From these\n spectra, accurate and precise column-averaged abundances of atmospheric\n constituents including CO2, CH4, N2O, HF, CO, H2O, and HDO, are retrieved. This\n is the GGG2020 data release of observations from the TCCON station at\n \"\"\"\n + location,\n }\n ]\n metadata[\"subjects\"] = [\n {\"subject\": \"atmospheric trace gases\"},\n {\"subject\": \"CO2\"},\n {\"subject\": \"CH4\"},\n {\"subject\": \"CO\"},\n {\"subject\": \"N2O\"},\n {\"subject\": \"column-averaged dry-air mole fractions\"},\n {\"subject\": \"remote sensing\"},\n {\"subject\": \"FTIR spectroscopy\"},\n {\"subject\": \"TCCON\"},\n ]\n for cont in metadata[\"contributors\"]:\n if cont[\"contributorType\"] == \"HostingInstitution\":\n cont[\"nameType\"] = \"Organizational\"\n if cont[\"contributorType\"] == \"ResearchGroup\":\n cont[\"nameType\"] = \"Organizational\"\n if cont[\"contributorType\"] == \"ContactPerson\":\n cont[\"contributorEmail\"] = contact_email\n\n license_url = (\n f\"https://renc.osn.xsede.org/ini210004tommorrell/{site_doi}/LICENSE.txt\"\n )\n metadata[\"rightsList\"] = [\n {\"rightsUri\": license_url, \"rights\": \"TCCON Data License\"}\n ]\n\n # Generate README file\n outf = open(\"README.txt\", \"w\")\n subprocess.run(\n [\"./create_readme_contents_tccon-data\", sitef], check=True, stdout=outf\n )\n\n # Generate new license\n lic_f = open(\"license-start.txt\", \"r\")\n lic_t = open(\"license-end.txt\", \"r\")\n lic = lic_f.read()\n cite = site_info[\"data_reference\"]\n lic = lic + cite\n lic = lic + \"\\n\\n\" + lic_t.read()\n outf = open(\"LICENSE.txt\", \"w\")\n outf.write(lic)\n outf.close()\n\n # Files to be uploaded\n files = [\"README.txt\", \"LICENSE.txt\", sitef]\n\n doi = metadata[\"identifiers\"][0][\"identifier\"]\n\n community = \"2dc56d1f-b31b-4b57-9e4a-835f751ae1e3\"\n\n file_links = upload_files(files, doi)\n\n print(json.dumps(metadata))\n\n response = caltechdata_write(\n metadata,\n token,\n [],\n production,\n schema=\"43\",\n publish=True,\n file_links=file_links,\n community=community,\n )\n\n print(response)\n rec_id = response\n\n if production == False:\n doi = \"10.33569/TCCON\"\n url = \"https://cd-sandbox.tind.io/records/\"\n datacite = DataCiteRESTClient(\n username=\"CALTECH.LIBRARY\",\n password=password,\n prefix=\"10.33569\",\n test_mode=True,\n )\n else:\n url = \"https://data.caltech.edu/records/\"\n datacite = DataCiteRESTClient(\n username=\"CALTECH.LIBRARY\", password=password, prefix=\"10.14291\"\n )\n\n # Strip contributor emails\n for c in metadata[\"contributors\"]:\n if \"contributorEmail\" in c:\n c.pop(\"contributorEmail\")\n if \"publicationDate\" in metadata:\n metadata.pop(\"publicationDate\")\n\n doi = datacite.public_doi(metadata, url + str(rec_id), doi=doi)\n print(doi)\n\n # Update sites file\n infile = open(\"/data/tccon/site_ids.csv\")\n site_ids = csv.reader(infile)\n outstr = site_name + \",\" + rec_id + \",\" + version + \"\\n\"\n for row in site_ids:\n outstr = outstr + \",\".join(row) + \"\\n\"\n infile.close()\n\n if production == True:\n os.rename(\"/data/tccon/site_ids.csv\", \"/data/tccon/old_site_ids.csv\")\n out_id = open(\"/data/tccon/site_ids.csv\", \"w\")\n out_id.write(outstr)\n out_id.close()\n\n # Update site list - assumes new sites are in alphabetical order\n\n # Generate site text\n for t in metadata[\"titles\"]:\n if \"titleType\" not in t:\n title = t[\"title\"].split(\"from\")[1].split(\",\")[0].strip()\n split = cred.split(\"/\")\n first = split[0]\n second = split[1]\n outsites = f\"{title} [{site_name}],https://doi.org/{doi},{first},{second}\\n\"\n\n existing = open(\"/data/tccon/sites.csv\", \"r\")\n sites = csv.reader(existing)\n outstr = \"\"\n included = False\n for row in sites:\n if row[0][0] > outsites[0] and included == False:\n outstr = outstr + outsites\n outstr = outstr + \",\".join(row) + \"\\n\"\n included = True\n else:\n outstr = outstr + \",\".join(row) + \"\\n\"\n os.rename(\"/data/tccon/sites.csv\", \"/data/tccon/old_sites.csv\")\n outsites = open(\"/data/tccon/sites.csv\", \"w\")\n outsites.write(outstr)\n outsites.close()\n","repo_name":"caltechlibrary/tccon-caltechdata","sub_path":"create_site.py","file_name":"create_site.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15956860952","text":"import jax.numpy as jnp\nfrom core.distribution import Distribution\nimport matplotlib.pyplot as plt\nfrom jax.experimental.ode import odeint\nfrom utils import divergence_fn\nimport numpy as np\nfrom PIL import Image\nimport jax\nimport seaborn as sns\nfrom matplotlib import animation\nfrom typing import List\n\nbatch_size_plot = 1000\n\n\n# def plot_result(net, params, init_distribution, target_potential, T):\n# bar_f = lambda _x, _t, _params: net.apply(_params, _t, _x) - target_potential.gradient(_x)\n# # compute x(T) by solve IVP (I) & compute the actor loss\n# # ================ Forward ===================\n# x_0 = init_distribution.sample(batch_size_plot)\n# states_0 = [x_0]\n#\n# def ode_func1(states, t):\n# x = states[0]\n# bar_f_t_theta = lambda _x: bar_f(_x, t, params)\n# dx = bar_f_t_theta(x)\n#\n# return [dx]\n#\n# tspace = jnp.array((0., T))\n# result_forward = odeint(ode_func1, states_0, tspace, atol=tolerance, rtol=tolerance)\n# x_T = result_forward[0][1]\n# # ================ Forward ===================\n#\n# print(jnp.mean(x_T, axis=(0,)))\n# # print(x_T.shape)\n# plt.scatter(x_T[:, 0], x_T[:, 1])\n# plt.savefig('Gaussian_to_Gaussian.png')\n# plt.show()\n\n\n\n\n\n# def plt_density_2d(prior_logdensity, bar_f, end_T=T, npts=256, LOW=-6, HIGH=6,\n# gif_subroutine=False):\n# side = jnp.linspace(LOW, HIGH, npts)\n# xx, yy = jnp.meshgrid(side, side)\n# x = jnp.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])\n# states_T = [x]\n#\n# def ode_func1(states, t):\n# t = end_T - t\n# x = states[0]\n# dx = bar_f(x, t)\n# return [-dx]\n#\n# tspace = jnp.array((0., end_T))\n# result_backward = odeint(ode_func1, states_T, tspace, atol=tolerance, rtol=tolerance)\n# x_0 = result_backward[0][1]\n#\n# log_p0x_0 = prior_logdensity(x_0)\n# states_0 = [x_0, log_p0x_0]\n#\n# def ode_func2(states, t):\n# x = states[0]\n# dx = bar_f(x, t)\n#\n# bar_f_t = lambda _x: bar_f(_x, t)\n# div_bar_f_t = lambda _x: divergence_fn(bar_f_t, _x)\n# dlog_ptx_t = - div_bar_f_t(x)\n# return [dx, dlog_ptx_t]\n#\n# tspace = jnp.array((0., end_T))\n# result_forward = odeint(ode_func2, states_0, tspace, atol=tolerance, rtol=tolerance)\n# x_T = result_forward[0][1]\n# log_pTx_T = result_forward[1][1]\n#\n#\n# pTx_T = jnp.exp(log_pTx_T).reshape(npts, npts)\n#\n# if not gif_subroutine:\n# print(\"numerical error %.5f\" % (jnp.mean(jnp.sum((x_T - x) ** 2, axis=(1,)))))\n# # ax = plt.gca()\n# plt.imshow(pTx_T)\n# plt.savefig('Gaussian_to_Gaussian.png')\n# plt.show()\n#\n# print(f\"The total mass between [{LOW, HIGH}]^2 is {((HIGH - LOW) / npts) ** 2 * jnp.sum(pTx_T)}\")\n# else:\n# return pTx_T\n#\n# def _plt_density_2d(prior_logdensity, target_potential, net, params, end_T=T, npts=256, LOW=-6, HIGH=6,\n# gif_subroutine=False):\n# bar_f = lambda _x, _t: net.apply(params, _t, _x) - target_potential.gradient(_x)\n# side = jnp.linspace(LOW, HIGH, npts)\n# xx, yy = jnp.meshgrid(side, side)\n# x = jnp.hstack([xx.reshape(-1, 1), jnp.flip(yy.reshape(-1, 1))])\n# states_T = [x]\n#\n# def ode_func1(states, t):\n# t = end_T - t\n# x = states[0]\n# dx = bar_f(x, t)\n# return [-dx]\n#\n# tspace = jnp.array((0., end_T))\n# result_backward = odeint(ode_func1, states_T, tspace, atol=tolerance, rtol=tolerance)\n# x_0 = result_backward[0][1]\n#\n# log_p0x_0 = prior_logdensity(x_0)\n# states_0 = [x_0, log_p0x_0]\n#\n# def ode_func2(states, t):\n# x = states[0]\n# dx = bar_f(x, t)\n#\n# bar_f_t = lambda _x: bar_f(_x, t)\n# div_bar_f_t = lambda _x: divergence_fn(bar_f_t, _x)\n# dlog_ptx_t = - div_bar_f_t(x)\n# return [dx, dlog_ptx_t]\n#\n# tspace = jnp.array((0., end_T))\n# result_forward = odeint(ode_func2, states_0, tspace, atol=tolerance, rtol=tolerance)\n# x_T = result_forward[0][1]\n# log_pTx_T = result_forward[1][1]\n#\n#\n# pTx_T = jnp.exp(log_pTx_T).reshape(npts, npts)\n#\n# if not gif_subroutine:\n# print(\"numerical error %.5f\" % (jnp.mean(jnp.sum((x_T - x) ** 2, axis=(1,)))))\n# # ax = plt.gca()\n# plt.imshow(pTx_T)\n# plt.savefig('Gaussian_to_Gaussian.png')\n# plt.show()\n#\n# print(f\"The total mass between [{LOW, HIGH}]^2 is {((HIGH - LOW) / npts) ** 2 * jnp.sum(pTx_T)}\")\n# else:\n# return pTx_T\n\n # Compute the total mass in the region\n\n\n# plt_density_2d_jit = jax.jit(_plt_density_2d, static_argnums=(0, 1, 2, 4, 5, 6, 7, 8))\n\n\n\n\n\n\n\n# def plt_density_1d(prior_logdensity, bar_f, npts=256, LOW=-6, HIGH=6):\n# x = jnp.linspace(LOW, HIGH, npts)[:, None]\n#\n# states_T = [x]\n#\n# def ode_func1(states, t):\n# t = T - t\n# x = states[0]\n# dx = bar_f(x, t)\n# return [-dx]\n#\n# tspace = jnp.array((0., T))\n# result_backward = odeint(ode_func1, states_T, tspace, atol=tolerance, rtol=tolerance)\n# x_0 = result_backward[0][1]\n#\n# log_p0x_0 = prior_logdensity(x_0)\n# states_0 = [x_0, log_p0x_0]\n#\n# def ode_func2(states, t):\n# x = states[0]\n# dx = bar_f(x, t)\n#\n# bar_f_t = lambda _x: bar_f(_x, t)\n# div_bar_f_t = lambda _x: divergence_fn(bar_f_t, _x)\n# dlog_ptx_t = - div_bar_f_t(x)\n# return [dx, dlog_ptx_t]\n#\n# tspace = jnp.array((0., T))\n# result_forward = odeint(ode_func2, states_0, tspace, atol=tolerance, rtol=tolerance)\n# x_T = result_forward[0][1]\n# log_pTx_T = result_forward[1][1]\n#\n# pTx_T = jnp.exp(log_pTx_T)\n# print(\"numerical error %.5f\" % (jnp.mean(jnp.sum((x_T - x) ** 2, axis=(1,)))))\n# # ax = plt.gca()\n# # plt.imshow(pTx_T)\n# plt.plot(x, pTx_T)\n# plt.savefig('Gaussian_to_Gaussian.png')\n# plt.show()\n\n\n# def plt_density_2d_new(initial_distribution: Distribution, bar_f, n_samples = 10000, end_T=T, n_frames = 100, LOW=-6, HIGH=6):\n# data = initial_distribution.sample(n_samples)\n# states_0 = [data]\n#\n# def ode_func1(states, t):\n# x = states[0]\n# dx = bar_f(x, t)\n# return [dx]\n#\n# tspace = jnp.linspace(0, end_T, n_frames)\n#\n# result_forward = odeint(ode_func1, states_0, tspace, atol=tolerance, rtol=tolerance)\n#\n# fig, ax = plt.subplots(figsize=(6, 6))\n#\n# def animate(num):\n# state = result_forward[0][num]\n# x, y = state[:, 0], state[:, 1]\n# ax.clear()\n# sns.scatterplot(x=x, y=y, s=5, color=\".15\")\n# sns.histplot(x=x, y=y, bins=50, pthresh=.1, cmap=\"mako\")\n# sns.kdeplot(x=x, y=y, levels=5, color=\"w\", linewidths=1)\n# ax.set_xlim(-10, 10)\n# ax.set_ylim(-10, 10)\n#\n# anim = animation.FuncAnimation(fig, animate, frames=len(result_forward[0]), blit=False)\n# fig.tight_layout()\n# anim.save('plot/contour.gif', writer='imagemagick', fps=5)\n# plt.show()\n\n\ndef plot_velocity_field_2d(args, f_velocity, interval=50):\n\n x, y = jnp.linspace(-args.plot_domain_size, args.plot_domain_size, num=41), jnp.linspace(-args.plot_domain_size, args.plot_domain_size, num=41)\n xx, yy = jnp.meshgrid(x, y)\n grid_points = jnp.stack([jnp.reshape(xx, (-1)), jnp.reshape(yy, (-1))], axis=1)\n\n\n velocity_0 = f_velocity(grid_points, 0.)\n\n fig, ax = plt.subplots()\n Q = ax.quiver(grid_points[:, 0], grid_points[:, 1], velocity_0[:, 0], velocity_0[:, 1], pivot='mid', color='r', units='inches')\n ax.set_xlim(jnp.min(grid_points[:, 0]), jnp.max(grid_points[:, 0]))\n ax.set_ylim(jnp.min(grid_points[:, 1]), jnp.max(grid_points[:, 1]))\n\n frames = int(args.total_evolving_time / interval * 1000)\n def update_quiver(num, Q):\n velocity = f_velocity(grid_points, num * interval / 1000.)\n Q.set_UVC(velocity[:, 0], velocity[:, 1])\n return Q\n\n anim = animation.FuncAnimation(fig, update_quiver, fargs=(Q,), frames=frames, interval=interval, blit=False)\n fig.tight_layout()\n file_name = f\"{args.plot_save_directory}/{args.PDE}/{args.total_evolving_time}_{args.diffusion_coefficient}_velocity.gif\"\n anim.save(file_name, writer='imagemagick', fps=2)\n # plt.show()\n plt.close(fig)\n\ndef plot_density_contour_2d(args, density_data: List[jnp.ndarray]):\n fig, ax = plt.subplots(figsize=(6, 6))\n pal = sns.dark_palette(\"navy\", as_cmap=True)\n\n def animate(num):\n state = density_data[num]\n x, y = state[:, 0], state[:, 1]\n ax.clear()\n # sns.scatterplot(x=x, y=y, s=5, color=\".15\")\n # sns.histplot(x=x, y=y, bins=50, pthresh=.1, cmap=\"mako\")\n sns.kdeplot(x=x, y=y, levels=10, color=\"w\", linewidths=1, cmap=pal)\n ax.set_xlim(-args.plot_domain_size, args.plot_domain_size)\n ax.set_ylim(-args.plot_domain_size, args.plot_domain_size)\n\n anim = animation.FuncAnimation(fig, animate, frames=len(density_data), blit=False)\n fig.tight_layout()\n file_name = f\"{args.plot_save_directory}/{args.PDE}/{args.total_evolving_time}_{args.diffusion_coefficient}_density_contour.gif\"\n anim.save(file_name, writer='imagemagick', fps=2)\n # plt.show()\n plt.close(fig)\n\ndef plot_trajectory_2d(args, trajectories: List[jnp.ndarray], plot_multiple=1.1):\n fig, ax = plt.subplots(figsize=(6, 6))\n colors = ['red', 'green', 'blue', 'yellow', 'magenta']\n # plot the trajectory\n for i, trajectory in enumerate(trajectories):\n assert trajectory.shape[1] == 2 # the data should be 2D\n plt.quiver(trajectory[:-1, 0], trajectory[:-1, 1],\n trajectory[1:, 0]-trajectory[:-1, 0], trajectory[1:, 1]-trajectory[:-1, 1],\n scale_units='xy', angles='xy', scale=1, color=colors[i % len(colors)])\n\n # mark the start and end points of every trajectory with scatter\n ## gather the start and end points\n start_points = jnp.stack([trajectory[0, :] for trajectory in trajectories], axis=0)\n plt.scatter(start_points[:, 0], start_points[:, 1], marker='o', linewidths=.5, color='b')\n end_points = jnp.stack([trajectory[-1, :] for trajectory in trajectories], axis=0)\n plt.scatter(end_points[:, 0], end_points[:, 1], marker='v', linewidths=.5, color='b')\n\n # particle may leave the domain of plot. increase the x/y limit to include them in the plot\n ax.set_xlim(-args.plot_domain_size * plot_multiple, args.plot_domain_size * plot_multiple)\n ax.set_ylim(-args.plot_domain_size * plot_multiple, args.plot_domain_size * plot_multiple)\n\n fig.tight_layout()\n file_name = f\"{args.plot_save_directory}/{args.PDE}/{args.total_evolving_time}_{args.diffusion_coefficient}_trajecotry.png\"\n plt.savefig(file_name, dpi=600)\n plt.close(fig)","repo_name":"shenzebang/Fokker-Planck-Self-consistency-jax","sub_path":"plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":10732,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"40386208932","text":"# Fails 235.py\n# Autors: Artis Erglis\n\nfrom PythonMagick import Image\n\n# Izgatavojam jaunu objektu - bilde\nbilde = Image(\"16x16\", \"#3A0CC1\")\n\n# Izgatavojam mainiigos x un y\nx=y=0\n\nfor a in range(0,5):\n bilde.pixelColor(8,a,\"#0CC170\")\nfor a in range(11,16):\n bilde.pixelColor(8,a,\"#0CC170\")\nfor a in range(6,10):\n bilde.pixelColor(a,5,\"#E64634\")\nfor a in range(6,10):\n bilde.pixelColor(a,11,\"#E64634\")\nfor a in range(5,12):\n bilde.pixelColor(6,a,\"#E64634\")\nfor a in range(5,12):\n bilde.pixelColor(10,a,\"#E64634\")\n\n# 16x16 pixles palielina lidz 200x200\nbilde.scale(\"200x200\")\n\n# Objektu 'bilde' ieraksta failaa\nbilde.write(\"235.png\")\n","repo_name":"easydors/datormaciba","sub_path":"public_html/darbi2/LD23/235.py","file_name":"235.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"40033549940","text":"n = int(input())\n\nfor i in range(n):\n s = input().replace(\" \", \"\")\n\n res = 0\n for j in s:\n res += ord(j) - 64\n \n if res == 100:\n print(\"PERFECT LIFE\")\n else:\n print(res)\n","repo_name":"8azelnut/BOJ","sub_path":"Bronze_2/15351.py","file_name":"15351.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7879692269","text":"#!/usr/bin/env python3\n# coding: utf-8\nfrom urllib import request\nimport urllib.request\nimport urllib.parse\nimport http.cookiejar\nimport json\nimport datetime\nimport sqlite3\nimport hashlib\nimport re\nimport time\n\n\ndef get_content(rank_name_group = '资讯',rank_name = '时事',date = '2016/04/11'):\n\n start = date\n end = date\n\n # url = \"http://www.newrank.cn/public/info/hot.html?period=day\"\n url_xhr = \"http://www.newrank.cn/xdnphb/list/day/article\"\n req = urllib.request.Request(url_xhr)\n\n # deal with headers\n ori_headers = {\n 'Host': 'www.newrank.cn',\n 'Connection': 'keep-alive',\n 'Content-Length': '148',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Origin': 'http://www.newrank.cn',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'DNT': '1',\n 'Referer': 'http://www.newrank.cn/public/info/hot.html?period=day',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'\n }\n\n # set nance (0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f)9 of 16\n nonce = '012345678'\n\n # set xyz /*! xdnphb linux-grunt-xdnphb-copyright 2016-03-22 */\n appBase = '/xdnphb'\n urlBase = appBase+'/'\n xyz_str = urlBase + 'list/day/article?AppKey=joker&end=%s&rank_name=%s&rank_name_group=%s&start=%s' % (end, rank_name, rank_name_group, start)\n xyz = hashlib.md5((xyz_str+'&nonce='+nonce).encode()).hexdigest()\n\n # deal with form data\n form_data = urllib.parse.urlencode({\n 'end': end,\n 'rank_name': rank_name,\n 'rank_name_group': rank_name_group,\n 'start': start,\n 'nonce': nonce,\n 'xyz': xyz\n }).encode()\n\n # add headers to req\n for key, value in ori_headers.items():\n req.add_header(key, value)\n\n # deal with cookies\n cj = http.cookiejar.CookieJar()\n pro = urllib.request.HTTPCookieProcessor(cj)\n\n # set proxy\n# proxy_support = request.ProxyHandler({'http':'http://27.24.158.155:84'})\n# opener = urllib.request.build_opener(proxy_support, pro)\n opener = urllib.request.build_opener(pro)\n\n op = opener.open(req, form_data)\n data = op.read().decode(\"UTF-8\") # \n\n ori_content = json.loads(data)\n inner_content = ori_content['value']\n print('正在获取 '+date.__str__()+' 的 ' + rank_name_group +' 分类下的 '+rank_name+' 数据')\n return inner_content\n\n\ndef get_date():\n url_xhr = \"http://www.newrank.cn/xdnphb/list/getDate\"\n req = urllib.request.Request(url_xhr)\n\n # deal with headers\n ori_headers = {\n 'Host': 'www.newrank.cn',\n 'Connection': 'keep-alive',\n 'Content-Length': '148',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Origin': 'http://www.newrank.cn',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'DNT': '1',\n 'Referer': 'http://www.newrank.cn/public/info/hot.html?period=day',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'\n }\n\n # set nance (0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f)9 of 16\n nonce = '012345678'\n\n # set xyz /*! xdnphb linux-grunt-xdnphb-copyright 2016-03-22 */\n appBase = '/xdnphb'\n urlBase = appBase+'/'\n xyz_str = urlBase + 'list/getDate?AppKey=joker'\n xyz = hashlib.md5((xyz_str+'&nonce='+nonce).encode()).hexdigest()\n\n # deal with form data\n form_data = urllib.parse.urlencode({\n 'nonce': nonce,\n 'xyz': xyz\n }).encode()\n\n # add headers to req\n for key, value in ori_headers.items():\n req.add_header(key, value)\n\n # deal with cookies\n cj = http.cookiejar.CookieJar()\n pro = urllib.request.HTTPCookieProcessor(cj)\n opener = urllib.request.build_opener(pro)\n\n op = opener.open(req, form_data, timeout=2)\n data = op.read().decode(\"UTF-8\") # \n\n ori_content = json.loads(data)\n inner_content = ori_content['value']['WEIXIN_CAL_DAY'][:10]\n return inner_content\n\n\ndef store_to_db(content,table_name):\n\n for x in range(len(content)):\n if content[x].get('summary',-1) == -1:\n content[x]['summary'] = None\n\n # sql_create, sql_insert\n sc = ''''''\n sii = \"\"\n i = 0\n sort_content = sorted(content[0].items(), key=lambda d: d[0])\n count = len(sort_content)\n while i < count:\n sc += '''%s TEXT,''' % sort_content[i][0]\n sii += \"content[index]['%s'],\" % sort_content[i][0]\n i += 1\n\n si = sii[0:(len(sii)-1)]\n\n sc = sc[0:(len(sc)-1)]\n s = \"?,\"*count\n s = s[0:(len(s)-1)]\n sc = re.sub(\",id TEXT\",\",id TEXT PRIMARY KEY NOT NULL\",sc)\n sql_create = ''' CREATE TABLE IF NOT EXISTS ''' + table_name + ''' (''' + '''%s''' % sc + ''')'''\n sql_insert = \"INSERT INTO \"+table_name+\" VALUES (\" + s + \")\"\n\n commit_count = 0\n\n conn = sqlite3.connect(table_name+\".db\")\n\n cur = conn.cursor()\n\n cur.execute(sql_create)\n\n L = []\n IDList = cur.execute(\"SELECT ID FROM \"+table_name)\n for row in IDList:\n L.append(row[0])\n\n for index in range(len(content)):\n uid_exist = 1 # uid existed\n if content[index]['id'] in L:\n uid_exist = 1\n break\n else:\n uid_exist = 0\n if len(cur.fetchall()) == 0:\n uid_exist = 0\n if uid_exist == 1:\n print(\"exist\")\n else:\n\n cur.execute(sql_insert,tuple(eval(si)))\n commit_count += 1\n # Save (commit) the changes\n conn.commit()\n print(\"新增 \"+str(commit_count)+\" 条数据\")\n conn.close()\n return table_name\n\n\ndef get_rownum_from_db(table_name):\n conn = sqlite3.connect(table_name+\".db\")\n cur = conn.cursor()\n cur.execute(\"SELECT count(*) FROM \"+table_name)\n total = cur.fetchone()\n conn.close()\n return total[0]\n\n\ndef main():\n getDate = get_date()\n getDate = datetime.datetime.strptime(getDate, \"%Y-%m-%d\").date()\n i = 0\n num = 7\n table_name = 'Info_hot_day'\n rank_name_group = '资讯'\n file_name = rank_name_group+'.txt'\n L = ['时事','民生','财富','科技','创业','汽车','楼市','职场','教育','学术','政务','企业']\n\n # 创建文件\n with open(file_name,'a+',encoding='utf-8') as f:\n f.close()\n while i < num:\n date = (getDate + datetime.timedelta(days=-i))\n with open(file_name,'r+',encoding='utf-8') as f:\n s = f.read()\n if str(date) in s:\n print(str(date)+'日的 '+rank_name_group+' 数据已抓取')\n f.close()\n else:\n for rank_name in L:\n time.sleep(3)\n content = get_content(rank_name_group,rank_name,date)\n store_to_db(content,table_name)\n f.write(str(date)+'\\n')\n f.close()\n i += 1\n\n rownum = get_rownum_from_db(table_name)\n print('数据库中共有 '+str(rownum)+' 条数据')\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"cheersberry/Info_hot_day","sub_path":"Info_hot_day.py","file_name":"Info_hot_day.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9099437639","text":"# Find the smallest positive integer that does not occur in a given sequence.\n\ndef Solution(A):\n setA = set(A)\n possetA = [i for i in setA if i > 0]\n if 1 not in possetA or not possetA:\n return 1\n else:\n maxA = max(possetA)\n sumofposA = sum(possetA)\n if sumofposA == (maxA * (maxA + 1))//2:\n return maxA + 1\n else:\n return (maxA * (maxA + 1))//2 - sumofposA\n\n\nA = [1, 3, 6, 4, 1, 2]\nprint(Solution(A))\nprint(Solution([1,2,3]))\nprint(Solution([-1,-3,-5,-1]))","repo_name":"thainguyentran/CodilityLessons","sub_path":"L4_CountingElements/MissingInteger.py","file_name":"MissingInteger.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"32776266187","text":"import json\nimport logging\n\nfrom chimera.backend.server import BaseChimeraServer, BaseConnectedClient\n\nLOGGER = logging.getLogger(\"chimera.messaging\")\n\n\nclass FakeConnectedClient(BaseConnectedClient):\n\n def __init__(self, name, notification_callback=None):\n super().__init__()\n self.name = name\n self._responses = []\n self._notifications = []\n\n async def _send_msg(self, msg):\n msg_str = json.dumps(msg)\n LOGGER.debug(f\"Server -> {self.name} | {msg_str}\")\n if msg[\"type\"] == \"response\":\n self._responses.append(msg)\n elif msg[\"type\"] == \"notification\":\n self._notifications.append(msg)\n\n @property\n def responses(self):\n while len(self._responses) > 0:\n m = self._responses.pop(0)\n yield m\n\n @property\n def num_responses(self):\n return len(self._responses)\n\n @property\n def notifications(self):\n while len(self._notifications) > 0:\n m = self._notifications.pop(0)\n yield m\n\n @property\n def num_notifications(self):\n return len(self._notifications)\n\n\nclass FakeChimeraServer(BaseChimeraServer):\n\n def __init__(self):\n super().__init__()\n self.clients = []\n\n async def start(self):\n pass\n\n async def stop(self):\n pass\n\n def create_client(self, name=\"Client\"):\n client = FakeConnectedClient(name)\n self.clients.append(client)\n return client\n\n async def fake_send_message(self, client, message):\n LOGGER.debug(f\"{client.name} -> Server | {message}\")\n await self._process_message(client, message)\n","repo_name":"uchicago-cs/chimera","sub_path":"src/chimera/backend/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19045330752","text":"day=[\"MON\",\"TUE\",\"WED\",\"THU\",\"FRI\",\"SAT\",\"SUN\"]\na = input()\nb = input()\nc = input()\nd = input()\nlength = min(len(a),len(b))\n\nfor i in range(length):\n if a[i] == b[i]:\n if a[i]>='A' and a[i]<='G':\n dd = day[ord(a[i])-ord('A')]\n position = i\n break\nfor i in range(position+1,length):\n if a[i] == b[i]:\n if a[i] >='A' and a[i]<='N':\n hour = ord(a[i])-ord('A')+10\n break\n elif a[i]>='0' and a[i]<='9':\n hour = ord(a[i])-ord('0')\n break\nlength = min(len(c),len(d))\nfor i in range(length):\n if c[i] == d[i]:\n if c[i]>='a' and c[i] <='z':\n minute = i\n break\n elif c[i]>='A' and c[i]<='Z':\n minute = i\n break\nprint(\"%s %02d:%02d\"%(dd,hour,minute))","repo_name":"drawAgirl/PAT","sub_path":"Python3/1061.py","file_name":"1061.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14247169748","text":"import email \nimport imaplib \nimport ctypes \nimport getpass\nmail = imaplib.IMAP4_SSL('imap.gmail.com',993) \nunm = 'your email' \npwd= 'your password' \nmail.login(unm,pwd) \nmail.select('INBOX') \ndef loop(): \n mail.select('INBOX') \n n=0\n (retcode,messages)=mail.search(None,'(ALL)')\n if retcode == 'OK' : \n for num in messages[0].split():\n n=n+1 \n typ,data= mail.fetch(num,'(RFC822)') \n raw_email = data[0][1] \n raw_email_string = raw_email.decode('utf-8') \n email_message = email.message_from_string(raw_email_string) \n for respone_part in data: \n if isinstance (respone_part,tuple): \n original = (email.message_from_string(respone_part[1])) \n d1 = original['From'] \n print ('\\n\\n\\nFrom : '+d1) \n data = original['Subject'] \n print ('Subject : '+data) \n typ, data = mail.store(num,'+FLAGS','\\\\Seen') \n for part in email_message.walk(): \n if part.get_content_type() == \"text/plain\": \n body = part.get_payload(decode=True) \n print (\"Body :\"+body) \n f=open(\"Email content.txt\",\"a\") \n f.write(d1) \n f.write(str(data)) \n f.write(body) \n f.close() \nif __name__ == '__main__': \n try:\n while True: \n loop() \n finally: \n print(\"Thanks for using this service\")\n","repo_name":"vvicky30/python","sub_path":"email_acess.py","file_name":"email_acess.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"72400017797","text":"import wx\nimport tab1_setup as t1\nimport tab2_crypto as t2\nimport tab3_engine as t3\nimport tab4_policy as t4\nimport tab5_attest as t5\nimport tab6_cloud as t6\nimport misc_dialogs as misc\nimport shell_util as exec_cmd\nimport images as img\nimport subprocess\nimport wx.lib.inspection\n\nclass MainFrame(wx.Frame):\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=\"Main Window\", style=(wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)))\n self.SetBackgroundColour(wx.WHITE)\n # Set Font for frame, so all buttons will inherit this, so it saves time\n main_menu_font = wx.Font(16, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n self.SetFont(main_menu_font)\n\n # Create all the button widgets first\n self.button1 = wx.Button(self, -1, 'Setup and Basic Features')\n self.button2 = wx.Button(self, -1, 'Cryptographic Functions')\n self.button3 = wx.Button(self, -1, 'OpenSSL-Engine')\n self.button4 = wx.Button(self, -1, 'Data Sealing with Policy')\n self.button5 = wx.Button(self, -1, 'Attestation')\n self.button6 = wx.Button(self, -1, 'AWS: IOT Core')\n # Title screen widget setup\n # \"\\xe2\\x84\\xa2\" represents the Trademark symbol in UTF-8 for Python 2.x, will not display properly on Windows (or Python 3.x)\n title_screen = wx.StaticText(self, -1, style=wx.ALIGN_CENTER, label=\"OPTIGA\"+ u\"\\u1d40\\u1d39\"+\" TPM 2.0 Explorer\")\n font = wx.Font(30, wx.ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)\n title_screen.SetFont(font)\n # TPM Image\n tpm_image = wx.Image('../images/tpm_slb_9670.png', wx.BITMAP_TYPE_PNG)\n tpm_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tpm_image))\n # ~ tpm_image = wx.StaticBitmap(self, wx.ID_ANY, img.tpm_slb_9670.getBitmap())\n\n # IFX Logo\n ifx_image = wx.Image('../images/250px-Infineon-Logo.png', wx.BITMAP_TYPE_PNG)\n ifx_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(ifx_image))\n # ~ ifx_image = wx.StaticBitmap(self, wx.ID_ANY, img._250px_Infineon_Logo.getBitmap()) \n \n # Setup logo\n tab1_image = wx.Image('../images/setup.png', wx.BITMAP_TYPE_PNG)\n tab1_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tab1_image))\n # ~ tab1_image = wx.StaticBitmap(self, wx.ID_ANY, img.setup.getBitmap())\n\n # Crypto logo\n tab2_image = wx.Image('../images/crypto.png', wx.BITMAP_TYPE_PNG)\n tab2_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tab2_image))\n # ~ tab2_image = wx.StaticBitmap(self, wx.ID_ANY, img.crypto.getBitmap())\n\n # Engine logo\n tab3_image = wx.Image('../images/engine.png', wx.BITMAP_TYPE_PNG)\n tab3_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tab3_image))\n # ~ tab3_image = wx.StaticBitmap(self, wx.ID_ANY, img.engine.getBitmap())\n\n # Cloud logo\n tab6_image = wx.Image('../images/cloud.png', wx.BITMAP_TYPE_PNG)\n tab6_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tab6_image))\n # ~ tab6_image = wx.StaticBitmap(self, wx.ID_ANY, img.cloud.getBitmap())\n \n # Attestation logo\n tab5_image = wx.Image('../images/attest.png', wx.BITMAP_TYPE_PNG)\n tab5_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tab5_image))\n # ~ tab5_image = wx.StaticBitmap(self, wx.ID_ANY, img.attest.getBitmap())\n \n # Policy logo\n tab4_image = wx.Image('../images/policy.png', wx.BITMAP_TYPE_PNG)\n tab4_image = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(tab4_image))\n # ~ tab4_image = wx.StaticBitmap(self, wx.ID_ANY, img.policy.getBitmap())\n\n # declare the sizers\n mainsizer = wx.BoxSizer(wx.VERTICAL)\n horisizer = wx.BoxSizer(wx.HORIZONTAL)\n horisizer2 = wx.BoxSizer(wx.HORIZONTAL)\n gdsizer = wx.GridSizer(rows=4, cols=3, vgap=0, hgap=5)\n \n # add the widgets to the sizers (add row by row)\n horisizer.AddSpacer(25)\n horisizer.Add(tpm_image, 0, wx.TOP, 17)\n horisizer.AddSpacer(175)\n horisizer.Add(title_screen, 0, wx.ALIGN_CENTRE)\n horisizer.AddSpacer(145)\n horisizer.Add(ifx_image, 0, wx.TOP, 10)\n \n horisizer2.AddSpacer(1278)\n\n gdsizer.Add(tab1_image, 0, wx.ALIGN_CENTRE | wx.TOP, 5)\n gdsizer.Add(tab2_image, 0, wx.ALIGN_CENTRE | wx.TOP, 5)\n gdsizer.Add(tab3_image, 0, wx.ALIGN_CENTRE | wx.TOP, 5)\n\n gdsizer.Add(self.button1, 1, wx.EXPAND | wx.ALL, 30)\n gdsizer.Add(self.button2, 1, wx.EXPAND | wx.ALL, 30)\n gdsizer.Add(self.button3, 1, wx.EXPAND | wx.ALL, 30)\n\n gdsizer.Add(tab4_image, 0, wx.ALIGN_CENTRE | wx.TOP, 5)\n gdsizer.Add(tab5_image, 0, wx.ALIGN_CENTRE | wx.TOP, 5)\n gdsizer.Add(tab6_image, 0, wx.ALIGN_CENTRE | wx.TOP, 5)\n\n gdsizer.Add(self.button4, 1, wx.EXPAND | wx.ALL, 30)\n gdsizer.Add(self.button5, 1, wx.EXPAND | wx.ALL, 30)\n gdsizer.Add(self.button6, 1, wx.EXPAND | wx.ALL, 30)\n\n mainsizer.Add(horisizer, 0, wx.EXPAND | wx.TOP, 20)\n mainsizer.Add(horisizer2)\n mainsizer.Add(-1, 31)\n mainsizer.Add(gdsizer, 1, wx.EXPAND)\n \n # Bind events\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n self.Bind(wx.EVT_BUTTON, self.OnButtonClick, self.button1)\n self.Bind(wx.EVT_BUTTON, self.OnButtonClick, self.button2)\n self.Bind(wx.EVT_BUTTON, self.OnButtonClick, self.button3)\n self.Bind(wx.EVT_BUTTON, self.OnButtonClick, self.button4)\n self.Bind(wx.EVT_BUTTON, self.OnButtonClick, self.button5)\n self.Bind(wx.EVT_BUTTON, self.OnButtonClick, self.button6)\n\n # Set tooltips\n self.button1.SetToolTip(wx.ToolTip(\"Take ownership here.\"))\n self.button2.SetToolTip(wx.ToolTip(\"Hashing, Encryption, Decryption, Verification & Signing\"))\n self.button3.SetToolTip(wx.ToolTip(\"Using TPM and OpenSSL to establish a client-server connection\"))\n self.button4.SetToolTip(wx.ToolTip(\"Making use of policies to seal and unseal objects\"))\n self.button5.SetToolTip(wx.ToolTip(\"Using endorsement key hierarchies to prove/attest\"))\n self.button6.SetToolTip(wx.ToolTip(\"Example use-case with AWS\"))\n\n self.SetSizer(mainsizer)\n mainsizer.Fit(self)\n# self.Show(True)\n \n self.Centre()\n self.Check_IFX_TPM()\n \n def Check_IFX_TPM(self):\n cmd =\" ls /dev/tpm0\"\n ps_command = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n command_output = ps_command.stdout.read()\n retcode = ps_command.wait()\n if( command_output.decode() != \"/dev/tpm0\\n\"):\n misc.Not_IFX_TPM_Dlg(self, \"TPM Device Not Found\").ShowModal()\n self.Disable_Buttons() \n return\n \n cmd =\" tpm2_getcap properties-fixed | grep -A2 'MANUFACTURER' | grep value | grep -Eo '[A-Z]*'\"\n ps_command = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n command_output = ps_command.stdout.read()\n \n retcode = ps_command.wait()\n if (not \"IFX\" in command_output.decode()):\n misc.Not_IFX_TPM_Dlg(self, \"Insert Infineon IRIDIUM Module\").ShowModal()\n self.Disable_Buttons() \n return\n \n def Disable_Buttons(self):\n self.button1.Disable()\n self.button2.Disable()\n self.button3.Disable()\n self.button4.Disable()\n self.button5.Disable()\n self.button6.Disable()\n \n def OnCloseWindow(self, evt):\n self.Destroy()\n\n # Technically this can be split into 6 different functions but I prefer it this way\n # The EngineDlg is required as the functions in tab3 and tab6 require the ownerAuth of the TPM to be set to NULL.\n def OnButtonClick(self, evt):\n event_obj = evt.GetEventObject()\n if (event_obj == self.FindWindowByLabel(label='Setup and Basic Features')):\n self.activetab = t1.Tab1Frame(self, \"Basic\")\n elif (event_obj == self.FindWindowByLabel(label='Cryptographic Functions')):\n self.activetab = t2.Tab2Frame(self, \"Crypto\")\n elif (event_obj == self.FindWindowByLabel(label='OpenSSL-Engine')):\n #~ if (misc.EngineDlg(self, \"Warning!\").ShowModal() == -1):\n #~ return\n self.activetab = t3.Tab3Frame(self, \"Engine\")\n elif (event_obj == self.FindWindowByLabel(label='Data Sealing with Policy')):\n self.activetab = t4.Tab4Frame(self, \"Data Sealing with Policy\")\n elif (event_obj == self.FindWindowByLabel(label='Attestation')):\n self.activetab = t5.Tab5Frame(self, \"Attest\")\n elif (event_obj == self.FindWindowByLabel(label='AWS: IOT Core')):\n #~ if (misc.EngineDlg(self, \"Warning!\").ShowModal() == -1):\n #~ return\n self.activetab = t6.Tab6Frame(self, 'Cloud')\n else:\n return\n self.Hide()\n\n\nclass Main(wx.App):\n def __init__(self, redirect=False, filename=None):\n wx.App.__init__(self, redirect, filename)\n dlg = MainFrame(None, title=\"Main\")\n self.SetTopWindow(dlg)\n dlg.Centre()\n# wx.lib.inspection.InspectionTool().Show()\n dlg.Show()\n\n\n# Always executes as this is the main file anyway\n# Note: This changes the working directory to /working_space, thus all created objects will be there\n# Navigation always starts from the /working_space folder.\nif __name__ == \"__main__\":\n exec_cmd.checkDir()\n app = Main() \n app.MainLoop()\n","repo_name":"Infineon/optiga-tpm-explorer","sub_path":"Python_TPM20_GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9748,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"62"} +{"seq_id":"24543316835","text":"\"\"\"\nThis module contains functions to randomly draw FP or planet system parameters.\n\nCreated on Wed Feb 17 17:23:07 2021\n\n@author: rodrigo\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\n# import pandas as pd\n'''\nfrom . import core as c\nfrom . import constants as cts\nfrom . import parameters as p\nfrom . import utils as u\n'''\nimport core as c\nimport constants as cts\nimport parameters as p\nimport utils as u\n\n\n\nhomedir = os.getenv('HOME')\nmldir = os.path.join(homedir, 'EXOML', 'TESS-pastis')\n\n\ndef draw_parameters(params, scenario, nsimu=1, **kwargs):\n \"\"\"\n Draw the parameters for a set of target (TIC) star.\n\n The draw is conditional on the parameter sets, representing the TIC star.\n\n :param iterable params: shape (nparams, size), list of parameters of TIC\n star. Is the second dimension is > 1, then nsimu draws are performed for\n each parameter set.\n\n :param str scenario: defines the scenario for which to draw parameters\n\n :param int nsimu: size of parameter set to draw\n\n The remaining kwargs are passed to the _draw_parameters functions\n \"\"\"\n ticstar = c.TargetStarParameters(params)\n ticstar.draw()\n\n # Draw parameters and return input dict for pastis\n if scenario.lower() in ['pla', 'planet']:\n planet = _draw_parameters_pla(ticstar, **kwargs)\n\n # Flag non-transiting planets\n flag = conservative_transit_flag([ticstar, planet])\n\n # Construct planet dict for pastis\n # Add tree-like structure\n planetdict = {'star1': 'Target1', 'planet1': 'Planet1'}\n\n input_dict = {'Target1': ticstar.to_pastis(flag),\n 'Planet1': planet.to_pastis(flag),\n 'PlanSys1': planetdict}\n\n elif scenario.lower() == 'beb':\n beb_params = _draw_parameters_beb(ticstar, **kwargs)\n\n # Flag non-transiting planets\n flag = conservative_transit_flag(beb_params)\n\n # Construct binary dict for pastis\n binarydict = beb_params[-1].to_pastis(flag)\n # Add tree-like structure\n binarydict.update({'star1': 'Blend1', 'star2': 'Blend2'})\n\n # binarydict['P'] = beb_params[1].period\n input_dict = {'Target1': ticstar.to_pastis(flag),\n 'Blend1': beb_params[0].to_pastis(flag),\n 'Blend2': beb_params[1].to_pastis(flag),\n 'IsoBinary1': binarydict,\n }\n\n elif scenario.lower() == 'btp':\n # First, create planetary system\n plansys_params = _draw_parameters_bkgplansys(ticstar, **kwargs)\n\n # Flag non-transiting planets\n flag = conservative_transit_flag(plansys_params)\n\n # Construct planetary system dict for pastis\n plansysdict = {'star1': 'Blend1', 'planet1': 'Planet1'}\n\n # binarydict['P'] = beb_params[1].period\n input_dict = {'Target1': ticstar.to_pastis(flag),\n 'Blend1': plansys_params[0].to_pastis(flag),\n 'Planet1': plansys_params[1].to_pastis(flag),\n 'PlanSys1': plansysdict,\n }\n\n elif scenario.lower() == 'triple':\n bbinary_params = _draw_parameters_boundbinary(ticstar, **kwargs)\n\n # Flag non-transiting planets\n flag = conservative_transit_flag(bbinary_params)\n\n # Construct binary dict for pastis\n binarydict = bbinary_params[-1].to_pastis(flag)\n # Add tree-like structure\n binarydict.update({'star1': 'Blend1', 'star2': 'Blend2'})\n\n # binarydict['P'] = beb_params[1].period\n hierch_orbit = c.OrbitParameters(orbittype='triple')\n hierch_orbit.draw(sum(flag))\n\n tripledict = hierch_orbit.to_pastis()\n tripledict.update({'object1': 'Target1',\n 'object2': 'IsoBinary1'})\n\n\n input_dict = {'Target1': ticstar.to_pastis(flag),\n 'Blend1': bbinary_params[0].to_pastis(flag),\n 'Blend2': bbinary_params[1].to_pastis(flag),\n 'IsoBinary1': binarydict,\n 'Triple1': tripledict\n }\n\n elif scenario.lower() == 'pib':\n # First, create planetary system\n plansys_params = _draw_parameters_boundplansys(ticstar, **kwargs)\n\n # Flag non-transiting planets\n flag = conservative_transit_flag(plansys_params)\n\n # Construct planetary system dict for pastis\n plansysdict = {'star1': 'Blend1', 'planet1': 'Planet1'}\n\n # Draw orbital parameters for triple\n hierch_orbit = c.OrbitParameters(orbittype='triple')\n hierch_orbit.draw(sum(flag))\n \n # Construct dictionary of hierarchichal orbit\n tripledict = hierch_orbit.to_pastis()\n tripledict.update({'object1': 'Target1',\n 'object2': 'PlanSys1'})\n\n # binarydict['P'] = beb_params[1].period\n input_dict = {'Target1': ticstar.to_pastis(flag),\n 'Blend1': plansys_params[0].to_pastis(flag),\n 'Planet1': plansys_params[1].to_pastis(flag),\n 'PlanSys1': plansysdict,\n 'Triple1': tripledict\n }\n\n elif scenario.lower() == 'eb':\n # Draw parameters for secondary\n # need to add attribute to Target star\n ticstar.minmass = 0.0\n secondary_params = _draw_parameters_secondary(ticstar, **kwargs)\n\n # Build full binary\n eb_params = [ticstar, *secondary_params]\n\n # Flag non-transiting binaries\n flag = conservative_transit_flag(eb_params)\n\n # Construct binary dict for pastis\n binarydict = eb_params[-1].to_pastis(flag)\n\n # Update orbital parameters with values from secondary\n # This is because the qBinary class in PASTIS is not\n # written the same way as IsoBinary, for example; only\n # one star is required here\n binarydict.update(eb_params[1].to_pastis(flag))\n\n # Add tree-like structure\n # Only primary required in qBinary\n binarydict.update({'star1': 'Target1'})#, 'star2': 'Blend2'})\n\n # binarydict['P'] = beb_params[1].period\n input_dict = {'Target1': ticstar.to_pastis(flag),\n # 'Blend2': eb_params[1].to_pastis(flag),\n 'qBinary1': binarydict,\n }\n\n return input_dict, flag\n\n\ndef _draw_parameters_pla(ticstar, **kwargs):\n \"\"\"Draw parameters for the Planet scenario.\"\"\"\n \n # Minimum planetary radius\n minimum_radius = kwargs.pop('minradius', p.MIN_PLA_RADIUS)\n\n # Instatiate planetary parameters\n planet = c.PlanetParameters(minradius=minimum_radius, **kwargs)\n # Draw parameters\n planet.draw(len(ticstar), **kwargs)\n\n return planet\n\n\ndef _draw_parameters_beb(ticstar, **kwargs):\n \"\"\"\n Draw parameters for the BEB scenario.\n\n This is done by drawing a background star and build the binary, much like\n the pastis object builder.\n \"\"\"\n\n maxdist = kwargs.pop('maxdist', p.MAX_DIST)\n\n # Build primary\n bkg_primary = c.BackgroundStarParameters(ticstar, minmass=0.5,\n maxdist=maxdist)\n\n # TODO: this could be replaced with PrimaryBkgParameters (should be the same)\n\n # Draw parameters for primary\n bkg_primary.draw()\n\n # Build secondary\n bkg_secondary = c.SecondaryBkgParameters(bkg_primary)\n bkg_secondary.draw()\n\n # Draw orbit\n orbit = c.OrbitParameters(orbittype='binary', **kwargs)\n orbit.draw(len(ticstar), **kwargs)\n\n return [bkg_primary, bkg_secondary, orbit]\n\n\ndef _draw_parameters_bkgplansys(ticstar, **kwargs):\n \"\"\"\n Draw parameters for a planetary system blended to a Target star.\n\n This is done by drawing a background star and build the system, much like\n the pastis object builder.\n \"\"\"\n\n maxdist = kwargs.pop('maxdist', p.MAX_DIST)\n\n # Build planet host\n planet_host = c.BackgroundStarParameters(ticstar, minmass=0.5,\n maxdist=maxdist)\n # Draw parameters for planet host\n planet_host.draw()\n\n # Build planet\n minimum_radius = kwargs.pop('min_radius', p.MIN_DILUTED_PLANET_RADIUS)\n planet = _draw_parameters_pla(planet_host, minradius=minimum_radius, **kwargs)\n planet.draw(len(planet_host), **kwargs)\n\n # Draw orbit\n # orbit = c.OrbitParameters(orbittype='planet')\n # orbit.draw(len(ticstar))\n\n return [planet_host, planet]\n\n\ndef _draw_parameters_boundbinary(ticstar, **kwargs):\n \"\"\"\n Draw parameters for the binary bound to a Target star.\n\n This is done by drawing a background star and build the binary, much like\n the pastis object builder.\n \"\"\"\n # Build primary\n binary_primary = c.BoundPrimaryParameters(ticstar, minmass=0.5)\n # Draw parameters for primary\n binary_primary.draw()\n\n # Build secondary\n binary_secondary = c.SecondaryBkgParameters(binary_primary)\n binary_secondary.draw()\n\n # Draw orbit\n orbit = c.OrbitParameters(orbittype='binary', **kwargs)\n orbit.draw(len(ticstar), **kwargs)\n\n return [binary_primary, binary_secondary, orbit]\n\n\ndef _draw_parameters_boundplansys(ticstar, **kwargs):\n \"\"\"\n Draw parameters for a planetary system bound to a Target star.\n\n This is done by drawing a bound star and build the system, much like\n the pastis object builder.\n \"\"\"\n # Build planet host\n planet_host = c.BoundPrimaryParameters(ticstar, minmass=0.5)\n # Draw parameters for planet host\n planet_host.draw()\n\n # Build planet\n minimum_radius = kwargs.pop('min_radius', p.MIN_DILUTED_PLANET_RADIUS)\n planet = _draw_parameters_pla(planet_host, minradius=minimum_radius, **kwargs)\n planet.draw(len(planet_host), **kwargs)\n\n # Draw orbit\n # orbit = c.OrbitParameters(orbittype='planet')\n # orbit.draw(len(ticstar))\n\n return [planet_host, planet]\n\n\ndef _draw_parameters_secondary(ticstar, **kwargs):\n \"\"\"\n Draw parameters for a secondary star bound to the main target star.\n \"\"\"\n if not ticstar.drawn:\n ticstar.draw()\n\n # Because SecondaryStarParameters require ticstar to have a mass\n # attribute, we will do that...\n\n binary_secondary = c.SecondaryStarParameters(ticstar)\n binary_secondary.draw()\n\n # Draw orbit\n orbit = c.OrbitParameters(orbittype='binary', **kwargs)\n orbit.draw(len(ticstar), **kwargs)\n\n orbit.q = binary_secondary.q\n\n return [binary_secondary, orbit]\n\n\ndef conservative_transit_flag(params):\n \"\"\"\n Flag systems according to whether they transit or not.\n\n Use a conservative approach when selecting stellar masses and radii.\n The objective at this point is performing a cut without having to buld the\n actual pastis objects.\n\n :param (Parameter class) params: instance of the parameter Class\n \"\"\"\n #TODO merge as many if conditions as possible\n # Check which class input belongs to.\n \n # Target + planet\n if (isinstance(params[0], c.TargetStarParameters) and\n isinstance(params[1], c.PlanetParameters)):\n # do something planety\n print('Checking parameters for planetary system')\n\n # Get masses\n mass2 = params[1].mass_mearth * cts.GMearth / cts.GMsun\n radius2_au = params[1].radius_rearth * cts.Rearth / cts.au\n\n # To be concervative, choose the smallest reasonable mass\n # and the largest possible radius\n # This will make the planet orbit closer to a larger star\n mass1 = 0.1\n radius1_au = 10.0 * cts.Rsun / cts.au\n\n # Define object containing orbital parameters\n orbit_params = params[1]\n\n # background star + secondary (BEB)\n elif (isinstance(params[0], c.BackgroundStarParameters) and\n isinstance(params[1], c.SecondaryBkgParameters)):\n # do something BEB\n print('Checking parameters for BEB system')\n\n assert len(params) > 2, \"Missing parameter object for the orbit\"\n\n # Get masses\n mass1 = params[0].mass\n mass2 = params[1].mass\n\n # Get radii\n # Again, to be conservative, choose LARGE radius\n radius1_au = 10.0 * cts.Rsun / cts.au\n radius2_au = 10.0 * cts.Rsun / cts.au\n\n # Define object containing orbital parameters\n orbit_params = params[2]\n\n # Triple condition\n elif (isinstance(params[0], c.BoundPrimaryParameters) and\n isinstance(params[1], c.SecondaryBkgParameters)):\n # do something triple\n print('Checking parameters for Triple system')\n\n assert len(params) > 2, \"Missing parameter object for the orbit\"\n\n # Get masses\n mass1 = params[0].mass\n mass2 = params[1].mass\n\n # Get radii\n # Again, to be conservative, choose LARGE radius\n radius1_au = 10.0 * cts.Rsun / cts.au\n radius2_au = 10.0 * cts.Rsun / cts.au\n\n # Define object containing orbital parameters\n orbit_params = params[2]\n\n # PIB condition\n elif (isinstance(params[0], c.BoundPrimaryParameters) and\n isinstance(params[1], c.PlanetParameters)):\n \n # do something planety\n print('Checking parameters for PiB system')\n\n # Get stellar mass\n mass1 = params[0].mass\n # get stellar radius\n # Again, to be conservative, choose LARGE radius\n radius1_au = 10.0 * cts.Rsun / cts.au\n \n # Get planet mass and orbital distance\n mass2 = params[1].mass_mearth * cts.GMearth / cts.GMsun\n radius2_au = params[1].radius_rearth * cts.Rearth / cts.au\n\n orbit_params = params[1] \n\n # EB Condition\n elif (isinstance(params[0], c.TargetStarParameters) and\n isinstance(params[1], c.SecondaryStarParameters)):\n\n # do something triple\n print('Checking parameters for EB system')\n\n # To be conservative, choose the smallest reasonable masses\n # and the largest possible radii\n # This will make the stars orbit closer and being larger\n mass1 = 0.1\n radius1_au = 5.0 * cts.Rsun / cts.au\n mass2 = 0.1\n radius2_au = 5.0 * cts.Rsun / cts.au\n\n # Define object containing orbital parameters\n orbit_params = params[2]\n\n # BTP condition\n elif (isinstance(params[0], c.BackgroundStarParameters) and\n isinstance(params[1], c.PlanetParameters)):\n # do something planety\n print('Checking parameters for BTP system')\n\n # Get stellar mass\n mass1 = params[0].mass\n # get stellar radius\n # Again, to be conservative, choose LARGE radius\n radius1_au = 10.0 * cts.Rsun / cts.au\n \n # Get planet mass and orbital distance\n mass2 = params[1].mass_mearth * cts.GMearth / cts.GMsun\n radius2_au = params[1].radius_rearth * cts.Rearth / cts.au\n\n orbit_params = params[1]\n\n # Get relevant orbital parameters\n periods = orbit_params.period\n ecc = orbit_params.ecc\n omega_deg = orbit_params.omega_deg\n incl_rad = orbit_params.incl_rad\n\n # Compute separation at inferior conjunction\n sma_au = u.sma(periods, mass1, mass2)\n r0 = u.r_infconj(ecc, omega_deg, sma_au / radius1_au)\n\n # compute impact parameter\n b = r0 * np.cos(incl_rad)\n\n # Return condition of transit\n return b <= 1 + radius2_au/radius1_au\n","repo_name":"exord/pastisML-tess","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":15294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"42179225834","text":"import json\nfrom json import JSONDecodeError\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.template import Context, TemplateDoesNotExist\nfrom django.template.loader import render_to_string\n\nfrom notifications.models import Activity, Notification\nfrom notifications.tasks import send_activity_notification\n\n\n@receiver(post_save, sender=Activity)\ndef send_notification(sender, instance, created, **kwargs):\n if created:\n\n context = Context(instance.data)\n try:\n alert = render_to_string(\"notifications/push/{0}.txt\".format(instance.code), context)\n # Alert *must not* contain newlines\n alert = ''.join(alert.splitlines())\n\n data_str = render_to_string(\"notifications/push/{0}.json\".format(instance.code), context)\n data = json.loads(data_str)\n except (TemplateDoesNotExist, JSONDecodeError):\n send_activity_notification.delay(instance.id, None)\n return\n\n notification = Notification.objects.create(\n user=instance.notify_to,\n code=instance.code,\n message=alert,\n data=data,\n )\n send_activity_notification.delay(instance.id, notification.id)\n","repo_name":"skshivammahajan/DRFChat","sub_path":"coonect/notifications/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15350107340","text":"\"\"\"\nThis module contains the Hash table data structure which\nimplements K MurmurHash3 hash functions, for existence check.\n\"\"\"\n\nfrom random import randint as random_number\nfrom mmh3 import hash as murmurhash3\n\n\nclass HashTable:\n \"\"\"\n Hash table data structure which implements K MurmurHash3 hash functions, for existence check.\n \"\"\"\n def __init__(self, k, m):\n \"\"\"\n Constructor for hash table which initialize the hash table.\n It construct the following properties:\n * Table consisting list of M zero values at first.\n * Randomized seed which is used for creating K different hash functions.\n * K different hash functions which is created by utility function `__initialize_hash_functions`\n\n :param k: Number of MurmurHash3 functions which the hash table will use.\n :param m: Size of the hash table elements.\n \"\"\"\n self.__table = [0] * m\n self.__seed = random_number(1, 4096)\n self.__hash_functions = []\n self.__initialize_hash_functions(k)\n\n def __initialize_hash_functions(self, k):\n \"\"\"\n Initialize K MurmurHash3 hash functions for the hash table.\n\n Each function will be different from each other, by using the multiplication of the randomized seed of\n the hash table with the index of the hash table.\n (under the assumption the K functions hashes in uniquely fashion)\n\n The MurmurHash3 functions then modulo by the table length, because the return value of\n the hash functions need to be in range [0, table length].\n\n :param k: Number of hash functions to initialize.\n \"\"\"\n for index in range(1, k + 1):\n self.__hash_functions.append(\n lambda value: murmurhash3(value, self.__seed * index, False) % len(self.__table)\n )\n\n def insert(self, value):\n \"\"\"\n Insert a given value into the hash table.\n For each of the hash functions, calculate the hashed key using given value\n and turn on the hashed key index in the internal table list.\n\n :param value: Value to insert into the hash table\n \"\"\"\n\n # Value considered to be exists in the hash table if all the hashed keys returned\n # by all the hashed functions with the given value, are turned on.\n for index in range(len(self.__hash_functions)):\n hashed_key = self.__hash_functions[index](value)\n self.__table[hashed_key] = 1\n\n def check_existence(self, value):\n \"\"\"\n Check existence of value in the hash table.\n The existence check determined by checking if all hash functions hashed keys are turned on\n in the internal table list.\n If all hashed keys turned on, the value is in the table, Otherwise it is not.\n\n :param value: Value to check if exists in the hash table.\n :return: True if the hash table contains the given value, Otherwise False.\n \"\"\"\n\n # Checking for each of the hash functions, if one of them returning key\n # which is turned off -> means the value not in the internal table -> not in\n # the hash table.\n for index in range(len(self.__hash_functions)):\n if self.__table[self.__hash_functions[index](value)] == 0:\n return False\n return True\n","repo_name":"shakedmanes/MurmurHash3_HashTable","sub_path":"hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42966276801","text":"# -*- coding: utf-8 -*-\nimport shutil, argparse, graphviz, os, subprocess\n\n# Applications that are mandatory\n# None will be replaced by system path to application\nAPPLICATIONS = {'nm':None}\n\n#=====================================================================\n# FUNCTIONS\n#=====================================================================\n\ndef application_check():\n # Cheks if applications are installed\n mustbeinstalled = []\n for i in APPLICATIONS:\n r = shutil.which(i)\n if r == None:\n mustbeinstalled.append(i)\n else:\n APPLICATIONS[i] = r\n if len(mustbeinstalled) > 0:\n raise Exception(f\"Applications that must be installed: {mustbeinstalled}\")\n\ndef parse_nm_output(fname, arg_stdout, ismain = True):\n # Parsing nm output\n symbol_dic = {}\n a = str(arg_stdout, encoding='utf-8')\n for e in a.split('\\n'):\n e2 = e.split()\n if len(e2) == 4 and e2[2] == 'T':\n if ismain:\n symbol_dic[e2[3]] = int(e2[1], base=16)\n else:\n symbol_dic[e2[3]] = [int(e2[1], base=16), fname]\n\n return symbol_dic\n\ndef link_symbols(main_dic, objdic_vector):\n # links symbol from main_dic to object file vector\n link = {}\n for symbol_name in main_dic:\n\n link[symbol_name] = []\n size = main_dic[symbol_name]\n\n for objdic in objdic_vector:\n if symbol_name in list(objdic.keys()):\n size2 = objdic[symbol_name][0]\n if size == size2:\n link[symbol_name].append(objdic[symbol_name][1])\n return link\n\ndef do_graph(mainbinary, linksymb_list):\n # graphviz graph creation\n dot = graphviz.Digraph(comment=os.path.basename(mainbinary))\n\n num = 0\n for symbol_key in ls:\n startnum = num\n\n dot.node(f'A{num}', f'{symbol_key}\\n{num}')\n num += 1\n\n if len(linksymb_list[symbol_key]) == 1:\n obj_file_name = linksymb_list[symbol_key][0]\n fname = os.path.basename(obj_file_name)\n dot.node(f'B{num}', f'{fname}\\n{num}')\n dot.edge(f'A{startnum}', f'B{num}')\n num += 1\n\n elif len(linksymb_list[symbol_key]) > 0:\n dot.node(f'M{startnum}', f'multi\\n{startnum}')\n dot.edge(f'A{startnum}', f'M{startnum}')\n\n for obj_file_name in linksymb_list[symbol_key]:\n fname = os.path.basename(obj_file_name)\n dot.node(f'B{num}', f'{fname}\\n{num}')\n dot.edge(f'M{startnum}', f'B{num}')\n num += 1\n\n else:\n dot.node(f'U{startnum}', f'?')\n dot.edge(f'A{startnum}', f'U{startnum}')\n\n return dot\n\n#=====================================================================\n# MAIN\n#=====================================================================\nparser = argparse.ArgumentParser(prog=\"pylibgraph\",\n description=\"python nm symbols grapher\")\nparser.add_argument('mainbinary', help='main binary to check')\nparser.add_argument('objdir', help='parent directory to look for object files')\n\nargs = parser.parse_args()\n\napplication_check()\n\n# arg verification\nif not os.path.isfile(args.mainbinary):\n raise Exception(f'binary {args.mainbinary} must exist')\n\nif not os.path.isdir(args.objdir):\n raise Exception(f'directory {args.objdir} must exist')\n\n# look for object files\nobj_files = []\nfor r, d, f in os.walk(args.objdir):\n for e in f:\n _, ext = os.path.splitext(e)\n if ext.lower() == '.o':\n obj_files.append(os.path.join(r,e))\n\n# nm for main binary\ncp = subprocess.run(args=[APPLICATIONS['nm'], '-S', args.mainbinary],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nif cp.returncode != 0:\n raise Exception(f'nm command failed for {args.mainbinary}\\nERROR: {cp.stderr}')\nmain_dic = parse_nm_output(args.mainbinary, cp.stdout)\n\n# nm for object files\nobjdic_vector = []\nfor e in obj_files:\n cp = subprocess.run(args=[APPLICATIONS['nm'], '-S', e],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if cp.returncode != 0:\n raise Exception(f'nm command failed for {e}\\nERROR: {cp.stderr}')\n e_dic = parse_nm_output(e, cp.stdout, ismain = False)\n objdic_vector.append(e_dic)\n\n# link main to objects\nls = link_symbols(main_dic, objdic_vector)\n\n# create graphviz\ndot = do_graph(args.mainbinary, ls)\n\ndot.render(f'mainbinary.gv', view=True)\n","repo_name":"gil390/pylibgraph","sub_path":"pylibgraph.py","file_name":"pylibgraph.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37072753106","text":"#!/usr/bin/env python3\n\n'''\nThis tool connects to a MQTT broker, subscribes to messages from iot/position,\nand computes if the device is moving or not from the acceleration values\ncontained in the MQTT messages received.\nIt is intended to allow rapid iteration and testing of different values\nof T and G.\n'''\n\nimport paho.mqtt.client as mqtt\nimport sys\nimport json\nimport traceback\nfrom math import *\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"iot/position/#\")\n\n\nold_modulo = 0\nold_alfa = 0\nold_gamma = 0\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n global old_alfa, old_gamma, old_modulo\n \n try:\n data = json.loads(msg.payload.decode('utf-8'))\n accel = json.loads('[' + data['last_accel'] + ']')\n \n gravity = 99\n modulo = abs(accel[0]**2 + accel[1]**2 + accel[2]**2 - gravity**2)\n alfa = atan2(accel[1], accel[0])\n gamma = atan2(-accel[0], sqrt(accel[1]**2 + accel[2]**2))\n delta_alfa = abs(alfa - old_alfa)\n delta_gamma = abs(gamma - old_gamma)\n delta_modulo = abs(modulo - old_modulo)\n \n #moving = (modulo >= 1000 or delta_modulo >= 500 or delta_alfa >= pi/12 or delta_gamma >= pi/12)\n moving = (modulo >= 1000 or delta_modulo >= 500)\n \n print('{:<20} {:>8} {:>8} {:>5.2f} {:>5.2f} {:>5.2f} {:>5.2f} is moving? {}'.format(\n str(accel), \n modulo, delta_modulo, alfa, delta_alfa, gamma, delta_gamma, \n moving))\n \n old_alfa = alfa\n old_gamma = gamma\n old_modulo = modulo\n except:\n traceback.print_exc()\n\n\nif len(sys.argv) < 2:\n print(\"usage:\", sys.argv[0], \"\");\n exit(0);\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(sys.argv[1], 1883, 60)\n\nclient.loop_forever()\n","repo_name":"marcobacis/mw_iot_person_detection","sub_path":"tools/movement-condition-test.py","file_name":"movement-condition-test.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28101683719","text":"import Reversi\nimport matplotlib.pyplot as plt\nimport Graphics\n\n\nclass Play:\n def __init__(self, board_size: int, agent1=None, agent2 = None):\n \n self.board_size = board_size\n self.agent1 = agent1\n self.agent2 = agent2\n\n \n\n fig = plt.figure(figsize = (7.2,7.2))\n ax = fig.add_subplot(111)\n self.game = Reversi.ReversiGame(board_size)\n\n self.graphics = Graphics.Graphics(self,ax, self.game)\n\n plt.show()\n\n\n \n def update(self, pos):\n if not self.game.isEnd():\n self.game.make_move(pos)\n self.graphics.update_board(self.game)\n turn = self.game.get_turn()\n if not self.game.isEnd() and turn == 1 and self.agent1!=None:\n move = self.agent1.get_move(Reversi.ReversiGame(copy = self.game))\n self.game.make_move(move)\n self.graphics.update_board(self.game)\n if not self.game.isEnd() and turn == -1 and self.agent2!=None:\n move = self.agent2.get_move(Reversi.ReversiGame(copy = self.game))\n self.game.make_move(move)\n self.graphics.update_board(self.game)\n\n \n \n","repo_name":"asaphc/Reversi_Othello_graphic_AI","sub_path":"Reversi/Play.py","file_name":"Play.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70629908357","text":"\"\"\"\nAnalyzers package methods that are shared between evaluators and trainers.\n\"\"\"\n\nfrom typing import Dict, Tuple, Union\n\nimport torch\n\nfrom dev_misc import FT, g, get_tensor\nfrom dev_misc.devlib import get_length_mask\nfrom dev_misc.devlib.named_tensor import NoName\nfrom dev_misc.trainlib import Metric, Metrics\nfrom xib.data_loader import ContinuousIpaBatch\nfrom xib.ipa import should_include\nfrom xib.model.decipher_model import DecipherModel, DecipherModelReturn\nfrom xib.model.extract_model import ExtractModelReturn\nfrom xib.model.lm_model import AdaptLMReturn, Cat\n\n\nclass LMAnalyzer:\n\n def analyze(self, scores: Dict[Cat, FT], return_scores: bool = False) -> Union[Metrics, Tuple[Metrics, Dict[Cat, FT]]]:\n metrics = Metrics()\n total_loss = 0.0\n total_weight = 0.0\n for name, (losses, weights) in scores.items():\n if should_include(g.feat_groups, name):\n loss = (losses * weights).sum()\n weight = weights.sum()\n total_loss += loss\n total_weight += weight\n loss = Metric(f'loss_{name.snake}', loss, weight)\n metrics += loss\n metrics += Metric('loss', total_loss, total_weight)\n if return_scores:\n return metrics, scores\n else:\n return metrics\n\n\nclass AdaptLMAnalyzer(LMAnalyzer):\n\n def analyze(self, ret: AdaptLMReturn) -> Metrics:\n metrics, scores = super().analyze(ret.distr, return_scores=True)\n # if g.use_moe:\n # # prior = get_tensor([g.prior_value, 1.0 - g.prior_value]).squeeze(dim=0)\n # # lp = ret.gate_log_probs\n # # kld = lp.exp() * (lp - prior.log())\n\n # # kld.\n # lp = ret.gate_log_probs\n # _p = lp.exp().sum(0) / lp.exp().sum()\n # prior = get_tensor([g.prior_value, 1.0 - g.prior_value]).squeeze(dim=0)\n # kld = _p * (_p.log() - prior.log())\n\n # bs = lp.size('batch')\n # kld = Metric('kld', kld.sum() * bs, bs)\n # metrics += kld\n\n # # sparsity.\n # _p = lp.exp()\n # with NoName(_p):\n # # sparsity = torch.nn.functional.softmin(torch.stack([_p, 1.0 - _p], dim=-1), dim=-1)\n # sparsity = torch.min(_p, 1.0 - _p)\n # sparsity = Metric('sparsity', sparsity.sum(), bs)\n # metrics += sparsity\n\n # metrics.rename('loss', 'ce_loss')\n # metrics += Metric('loss', metrics.ce_loss.total, bs)\n # # metrics += Metric('loss', metrics.ce_loss.total + kld.total, bs)\n # # metrics += Metric('loss', metrics.ce_loss.total + kld.total + sparsity.total, bs)\n\n if g.use_moe:\n metrics = Metrics()\n metrics_noise, scores_noise = super().analyze(ret.distr_noise, return_scores=True)\n total_loss = 0.0\n total_weight = 0.0\n cnt = 0\n prob_cnt = 0\n\n # gate_log_probs = ret.gate_logits.log_softmax(dim=-1)\n\n all_scores = [s for _, (s, _) in scores.items()]\n all_weights = [w for _, (_, w) in scores.items()]\n weight = all_weights[0]\n\n sum_scores = torch.stack(all_scores, new_name='stacked').sum(dim='stacked')\n batch_probs = ret.gate_logits.log_softmax(dim=-1).exp()[:, 0] * weight # + (-999.9) * (1.0 - weight))\n # batch_probs = (ret.gate_logits[:, 0] + (-999.9) * (1.0 - weight)).log_softmax(dim='batch').exp()\n bs = batch_probs.size('batch')\n total = int(g.prior_value * weight.sum())\n diff_loss = ((batch_probs.sum() - total) ** 2).sum()\n diff_loss = Metric('diff_loss', diff_loss, bs)\n loss = (sum_scores * batch_probs).sum()\n loss = Metric('loss', loss + diff_loss.total, bs)\n\n metrics += diff_loss\n metrics += loss\n\n # for name in scores:\n # s, w = scores[name]\n # sn, _ = scores_noise[name]\n # all_score = torch.stack([s, sn], new_name='expert')\n # probs = gate_log_probs.exp()\n # loss = ((all_score * probs) * w.align_as(all_score)).sum()\n # cnt += ((all_score[:, 0] < all_score[:, 1]) * w).sum()\n # prob_cnt += ((probs[:, 0] > probs[:, 1]) * w).sum()\n # weight = w.sum()\n # total_loss += loss\n # total_weight += weight\n # loss = Metric(f'loss_{name.snake}', loss, weight)\n # metrics += loss\n\n # # kld.\n # lp = gate_log_probs\n # _p = lp.exp().sum(0) / lp.exp().sum()\n # prior = get_tensor([g.prior_value, 1.0 - g.prior_value]).squeeze(dim=0)\n # kld = _p * (_p.log() - prior.log())\n\n # bs = lp.size('batch')\n # kld = Metric('kld', kld.sum() * bs, bs)\n # metrics += kld\n\n # metrics += Metric('loss', total_loss, total_weight)\n # metrics += Metric('loss', total_loss + kld.total, total_weight)\n\n # print('cnt', cnt / total_weight)\n # print('prob', prob_cnt / total_weight)\n return metrics\n else:\n return metrics\n\n\ndef _compute_utility(logits: FT, sample_scores: FT) -> FT:\n sample_log_probs = logits.log_softmax(dim='sample')\n utility = (sample_log_probs.exp() * sample_scores).sum()\n return utility\n\n\nclass DecipherAnalyzer:\n\n def analyze(self, model_ret: DecipherModelReturn, batch: ContinuousIpaBatch) -> Metrics:\n if g.supervised:\n return self._analyze_supervised(model_ret, batch)\n else:\n return self._analyze_unsupervised(model_ret, batch)\n\n def _analyze_supervised(self, model_ret: DecipherModelReturn, batch: ContinuousIpaBatch) -> Metrics:\n metrics = Metrics()\n if g.train_phi:\n sample_scores = model_ret.scores.phi_score\n sample_scores = sample_scores.align_to('batch', 'sample')\n gold_log_probs = sample_scores.log_softmax(dim='sample')[:, 0]\n total_loss = Metric('total_loss', -gold_log_probs.sum(), batch.batch_size)\n else:\n target_log_probs = model_ret.probs.label_log_probs.gather('label', batch.gold_tag_seqs)\n weight = (~batch.source_padding).float().align_as(target_log_probs)\n total_loss = (target_log_probs * weight).sum()\n total_loss = Metric('total_loss', -total_loss, weight.sum())\n metrics += total_loss\n return metrics\n\n def _analyze_unsupervised(self, model_ret: DecipherModelReturn, batch: ContinuousIpaBatch) -> Metrics:\n metrics = Metrics()\n # TODO(j_luo) Check the sample scores for hyps that are dummies (i.e., the length of the segment is too small to get beam_size hyps).\n is_unique = model_ret.packed_words.is_unique\n modified_logits = model_ret.probs.sample_log_probs * g.concentration + (~is_unique).float() * (-999.9)\n sample_scores = model_ret.scores.phi_score\n ptb_sample_scores = model_ret.ptb_scores.phi_score\n duplicates = model_ret.duplicates\n with NoName(ptb_sample_scores):\n ptb_sample_scores[duplicates] = -999.9\n bs = sample_scores.size('batch')\n ptb_sample_scores = ptb_sample_scores.unflatten('batch', [('batch', bs), ('contrast', g.n_times * 2)])\n sample_scores = sample_scores.align_as(ptb_sample_scores)\n all_scores = torch.cat([sample_scores, ptb_sample_scores], dim='contrast')\n all_probs = all_scores.log_softmax(dim='contrast').exp()\n sample_probs = all_probs.align_to(..., 'contrast')[..., 0]\n utility = _compute_utility(modified_logits, sample_probs)\n total_loss = Metric('total_loss', -utility, batch.batch_size)\n metrics += total_loss\n\n return metrics\n\n\nclass ExtractAnalyzer:\n\n def analyze(self, model_ret: ExtractModelReturn, batch: ContinuousIpaBatch) -> Metrics:\n metrics = Metrics()\n metrics += Metric('ll', model_ret.best_matched_ll.sum(), batch.batch_size)\n\n almt = model_ret.alignment\n if almt is not None:\n reg = ((almt.sum(dim=0) - 1.0) ** 2).sum()\n metrics += Metric('reg', reg, batch.batch_size)\n return metrics\n","repo_name":"j-luo93/xib","sub_path":"xib/training/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"34152911437","text":"class Solution:\n def canJump(self, nums):\n bestJumper,i = nums[0],0\n # if bestJump is lesser than index, you couldn't make it the index already\n while i=len(nums)-1\n","repo_name":"YounghoonKwon/leets-get-it","sub_path":"201123-Leet55-JumpGame/soungkook.py","file_name":"soungkook.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"9587013589","text":"#!/usr/bin/env python\r\n\r\n\"\"\"\r\nDownload the pdfs in a list of links to Springer free books\r\n\r\nUsage -\r\n\r\n The script should be placed in a folder and in the same folder\r\n should be placed a file named \"Books_List.txt\" where should \r\n be placed a list of links to Springer books - one link per row.\r\n\r\n The script should be executed with no arguments and the rdfs will\r\n be downloaded in subfolder \"downloaded\"\r\n\r\n If the link leads to a page with no button for pdf download the \r\n link will be stored in a file \"skipped_url_list.txt\"\r\n\r\nRequires - requests >= 2.20.0\r\n beautifulsoup >= 4.9.0\r\n pandas >= 0.23.4\r\n\r\nDownload and install using\r\n \r\n pip install requests\r\n pip install beautifulsoup4\r\n pip install pandas \r\n\"\"\"\r\n\r\n__author__= 'hmhristov '\r\n__license__= 'MIT'\r\n__version__= '1.0.0'\r\n\r\nimport requests \r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport os\r\n\r\ndef get_pdf_subpage (header_page_url):\r\n \r\n page = requests.get(header_page_url)\r\n soup_header_page = BeautifulSoup(page.text, 'html.parser' )\r\n current_link = ''\r\n url = ''\r\n page_title = soup_header_page.find('h1').text\r\n for i in (' ', ':', '/', '?', ',', '.', '-', '!', ';'):\r\n page_title = page_title.replace(i,'')\r\n\r\n book_year=soup_header_page.find('span', class_=\"bibliographic-information__value\", id=\"copyright-info\").text[-4:]\r\n\r\n for link in soup_header_page.find_all('a'):\r\n current_link = link.get('href')\r\n title = link.get('title')\r\n if current_link.endswith('pdf') and str(title) == 'Download this book in PDF format':\r\n url = 'https://link.springer.com' + current_link\r\n break \r\n \r\n return (url, book_year + '_Book_' + page_title)\r\n\r\nif __name__=='__main__':\r\n\r\n base_dir = os.getcwd()\r\n\r\n bookslist_file_name = base_dir + r\"\\Books_List.txt\"\r\n print(bookslist_file_name)\r\n # bookslist_file_name = bookslist_file_name.replace('\\\\','/')\r\n \r\n df_books = pd.read_csv(bookslist_file_name, header=None, names=['Link'])\r\n check_list_file = open(base_dir + r'\\skipped_url_list.txt','a+')\r\n\r\n books_folder = base_dir + r'\\downloaded'\r\n\r\n if not os.path.exists(books_folder):\r\n os.makedirs(books_folder)\r\n\r\n books_folder = books_folder + '\\\\'\r\n\r\n for my_url in df_books['Link']:\r\n\r\n download_attributes = get_pdf_subpage(my_url)\r\n url_row = my_url + ' ' + download_attributes[0] + ' ' + download_attributes[1]\r\n\r\n if download_attributes[0]:\r\n # The button for PDF download is found\r\n myfile = requests.get(download_attributes[0])\r\n print('downloading ' + download_attributes[1])\r\n open(books_folder + download_attributes[1] +'.pdf', 'wb').write(myfile.content) \r\n else:\r\n print('Can not download ' + url_row) \r\n check_list_file.write(url_row + '\\n')\r\n","repo_name":"h111359/python","sub_path":"stand_alone/springer_pdf_scrape.py","file_name":"springer_pdf_scrape.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40889128739","text":"from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, decoders\nfrom transformers import PreTrainedTokenizerFast\n\nall_tokens = [\"House\", \"Commute\", \"Office\", \"Store_Daily\", \"Go_School\", \"School\", \"Back_Home\",\n \"Shopping_Daily\", \"Shopping_Nondaily\", \"Store_Nondaily\", \"Go_Eat\", \"Socializing\",\n \"Go_Recreational_Facility\", \"Pickup_Drop_Off\", \"Go_Sightseeing\", \"Tourist_Spot\",\n \"Private_Movement\", \"Private_Space\", \"Delivering\", \"Business_Place\", \"Attend_Meeting\",\n \"Go_Occupation\", \"Go_Agricultural_Work\", \"Natural_Area\", \"Go_Other_Business\",\n \"Go_Exercise\", \"Pitch\", \"Volunteering\", \"Public_Space\", \"Welcoming\", \"[UNK]\", \"[PAD]\", \"[EOS]\"]\nspecial_tokens = [\"[UNK]\", \"[PAD]\", \"[EOS]\"]\n\nvocab = {token: i for i, token in enumerate(all_tokens)}\nmodel = models.WordLevel(vocab=vocab, unk_token=\"[UNK]\")\n\ntokenizer = Tokenizer(model)\ntokenizer.normalizer = normalizers.Sequence([normalizers.NFD(), normalizers.StripAccents()])\ntokenizer.pre_tokenizer = pre_tokenizers.Whitespace()\ntokenizer.decoder = decoders.WordPiece()\ntokenizer.add_special_tokens(special_tokens)\ntokenizer.save(\"/home/ubuntu/Documents/Tokenizer/trip_chain_tokenizer.json\")\n\n# # Test tokenizer\n# tokenizer = PreTrainedTokenizerFast(tokenizer_file=\"/home/ubuntu/Documents/Tokenizer/trip_chain_tokenizer.json\")\n# tokenizer.pad_token = \"[PAD]\"\n# tokenizer.eos_token = \"[EOS]\"\n#\n# encoded = tokenizer.encode(\"House Store_Daily Back_Home Pickup_Drop_Off [EOS]\")\n# print(encoded)\n#\n# decoded = tokenizer.decode(encoded)\n# print(decoded)\n","repo_name":"ZinniAZKY/ActivityChain","sub_path":"TokenizerGeneration.py","file_name":"TokenizerGeneration.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3439492191","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.utils.crypto import get_random_string \ndef random(request):\n\tif not 'count' in request.session:\n\t\trequest.session = 0\n\telse:\n\t\trequest.session['count'] += 1\n\n\tword = get_random_string(length=5)\n\tcontext = {\"random\": word,\n\t\t'counter': request.session['count']\n\t}\n\n\treturn render(request, 'random_word/randword.html', context)\n\ndef reset(request):\n\trequest.session['count'] = 0\n\treturn redirect('/random')\n# Create your views here.\n","repo_name":"thinhle93/Django-","sub_path":"time_display/apps/random_word/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26153553440","text":"import cv2\n\n\ndef video_display(video):\n while not video.stop.is_set():\n try:\n if video.new_frame.wait(1):\n cv2.imshow('Video', video.current_frame)\n video.new_frame.clear()\n if cv2.waitKey(1) == ord('q'):\n video.stop.set()\n except:\n video.stop.set()\n raise\n\n cv2.destroyAllWindows()\n print('Exiting show thread')\n","repo_name":"psiexperiment/psivideo","sub_path":"psivideo/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"29055840168","text":"# coding=gbk\n\"\"\"\n * User: ²ÌÕýÁú\n * Date: 2018/5/21\n * Time: 11:25\n * Description: ²âÊÔÄ£¿é\n\"\"\"\nimport unittest\n\n\nfrom jingtum_python_lib.logger import logger\nfrom jingtum_python_lib.remote import Remote\n\n\nclass OfferTest(unittest.TestCase):\n @staticmethod\n def test_build_offer_create():\n remote = Remote(local_sign=True)\n options = {\n 'type': 'Sell',\n 'account': 'jB7rxgh43ncbTX4WeMoeadiGMfmfqY2xLZ',\n 'taker_gets': {\n 'value': '0.01',\n 'currency': '8100000036000020160622201606300120000002',\n 'issuer': 'jBciDE8Q3uJjf111VeiUNM775AMKHEbBLS'\n },\n 'taker_pays': {\n 'value': '1',\n 'currency': 'SWT',\n 'issuer': ''\n }\n }\n\n if not isinstance(remote.connect(), Exception):\n tx = remote.build_offer_create_tx(options)\n tx.set_secret('sn37nYrQ6KPJvTFmaBYokS3FjXUWd')\n s = tx.submit()\n result = remote.parse_payment(s)\n logger.info(result)\n\n @staticmethod\n def test_build_offer_cancel():\n remote = Remote(local_sign=True)\n options = {\n 'sequence': 1777,\n 'account': 'jB7rxgh43ncbTX4WeMoeadiGMfmfqY2xLZ',\n }\n\n if not isinstance(remote.connect(), Exception):\n tx = remote.build_offer_cancel_tx(options)\n tx.set_secret('sn37nYrQ6KPJvTFmaBYokS3FjXUWd')\n s = tx.submit()\n result=remote.parse_payment(s)\n logger.info(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"swtcpro/jingtum-lib-python","sub_path":"test/test_417_418_offer.py","file_name":"test_417_418_offer.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"62"} +{"seq_id":"70361060357","text":"from typing_extensions import Final\n\nPREFIX = 'CPyPy_' # type: Final # Python wrappers\nNATIVE_PREFIX = 'CPyDef_' # type: Final # Native functions etc.\nDUNDER_PREFIX = 'CPyDunder_' # type: Final # Wrappers for exposing dunder methods to the API\nREG_PREFIX = 'cpy_r_' # type: Final # Registers\nSTATIC_PREFIX = 'CPyStatic_' # type: Final # Static variables (for literals etc.)\nTYPE_PREFIX = 'CPyType_' # type: Final # Type object struct\nMODULE_PREFIX = 'CPyModule_' # type: Final # Cached modules\nATTR_PREFIX = '_' # type: Final # Attributes\n\nENV_ATTR_NAME = '__mypyc_env__' # type: Final\nNEXT_LABEL_ATTR_NAME = '__mypyc_next_label__' # type: Final\nTEMP_ATTR_NAME = '__mypyc_temp__' # type: Final\nLAMBDA_NAME = '__mypyc_lambda__' # type: Final\nPROPSET_PREFIX = '__mypyc_setter__' # type: Final\nSELF_NAME = '__mypyc_self__' # type: Final\nINT_PREFIX = '__tmp_literal_int_' # type: Final\n\n# Max short int we accept as a literal is based on 32-bit platforms,\n# so that we can just always emit the same code.\nMAX_LITERAL_SHORT_INT = (1 << 30) - 1 # type: Final\n\nTOP_LEVEL_NAME = '__top_level__' # type: Final # Special function representing module top level\n\n# Maximal number of subclasses for a class to trigger fast path in isinstance() checks.\nFAST_ISINSTANCE_MAX_SUBCLASSES = 2 # type: Final\n\n\ndef decorator_helper_name(func_name: str) -> str:\n return '__mypyc_{}_decorator_helper__'.format(func_name)\n\n\ndef shared_lib_name(group_name: str) -> str:\n \"\"\"Given a group name, return the actual name of its extension module.\n\n (This just adds a suffix to the final component.)\n \"\"\"\n return '{}__mypyc'.format(group_name)\n","repo_name":"EngineerSpock/Python-from-Zero-to-Hero","sub_path":"10-Что нового в Python 3.8/Python_3_8/venv/Lib/site-packages/mypyc/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"62"} +{"seq_id":"72028202756","text":"from unittest import TestCase\nfrom uuid import uuid4\n\nfrom arbeitszeit.use_cases.get_plan_details import GetPlanDetailsUseCase\nfrom tests.data_generators import CompanyGenerator, PlanGenerator\n\nfrom .dependency_injection import get_dependency_injector\n\n\nclass UseCaseTests(TestCase):\n def setUp(self) -> None:\n self.injector = get_dependency_injector()\n self.plan_generator = self.injector.get(PlanGenerator)\n self.company_generator = self.injector.get(CompanyGenerator)\n self.use_case = self.injector.get(GetPlanDetailsUseCase)\n self.company = self.company_generator.create_company_record()\n\n def test_that_none_is_returned_when_plan_does_not_exist(self) -> None:\n request = GetPlanDetailsUseCase.Request(uuid4())\n self.assertFalse(self.use_case.get_plan_details(request))\n\n def test_plan_details_is_returned_when_plan_exists(self):\n plan = self.plan_generator.create_plan()\n request = GetPlanDetailsUseCase.Request(plan.id)\n self.assertTrue(self.use_case.get_plan_details(request))\n","repo_name":"arbeitszeit/arbeitszeitapp","sub_path":"tests/use_cases/test_get_plan_details.py","file_name":"test_get_plan_details.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"62"} +{"seq_id":"8398609247","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nimport torch\nimport glob\nimport numpy as np\nimport cv2\nimport math\nimport matplotlib.pyplot as plt\nimport os\nfrom imutils import face_utils\nimport argparse\nimport dlib\nfrom function.arch_sfsnet import SfsNetPipeline\nfrom PIL import Image, ImageDraw\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom function.color_harmonize import main as harmonize\nfrom function.skin_change import main as race\n\n\n# In[2]:\n\n\ndef lambertian_attenuation(n) :\n#%a = [.8862; 1.0233; .4954];\n pi = 3.14\n a = pi*np.array([1,2/3,0.25])\n if n > 3:\n print('didnt record more than 3 attenuations')\n o = a[0:n]\n return o\n\n\n# In[3]:\n\n\ndef normal_harmonics(N, att):\n # % Return the harmonics evaluated at surface normals N, attenuated by att.\n # % Normals can be scaled surface normals, in which case value of each\n # % harmonic at each point is scaled by albedo.\n # % Harmonics written as polynomials\n # % 0,0 1/sqrt(4*pi)\n # % 1,0 z*sqrt(3/(4*pi))\n # % 1,1e x*sqrt(3/(4*pi))\n # % 1,1o y*sqrt(3/(4*pi))\n # % 2,0 (2*z.^2 - x.^2 - y.^2)/2 * sqrt(5/(4*pi))\n # % 2,1e x*z * 3*sqrt(5/(12*pi))\n # % 2,1o y*z * 3*sqrt(5/(12*pi))\n # % 2,2e (x.^2-y.^2) * 3*sqrt(5/(48*pi))\n # % 2,2o x*y * 3*sqrt(5/(12*pi))\n pi = 3.14\n xs = (N[:,0])\n ys = (N[:,1])\n zs = (N[:,2])\n a = np.sqrt(pow(xs,2)+pow(ys,2)+pow(zs,2))\n #denom = (a==0) + a\n #x = xs./a; y = ys./a; z = zs./a;\n x = xs / a\n y = ys / a\n z = zs / a\n\n x2 = np.multiply(x,x)\n y2 = np.multiply(y,y)\n z2 = np.multiply(z,z)\n xy = np.multiply(x,y)\n xz = np.multiply(x,z)\n yz = np.multiply(y,z)\n \n H1 = att[0]*(1/np.sqrt(4*pi)) * a\n H2 = att[1]*(np.sqrt(3/(4*pi))) * zs\n H3 = att[1]*(np.sqrt(3/(4*pi))) * xs\n H4 = att[1]*(np.sqrt(3/(4*pi))) * ys\n H5 = att[2]*(1/2)*(np.sqrt(5/(4*pi))) * (np.multiply((2*z2 - x2 - y2) , a))\n H6 = att[2]*(3*np.sqrt(5/(12*pi))) * (np.multiply(xz , a))\n H7 = att[2]*(3*np.sqrt(5/(12*pi))) * (np.multiply(yz , a))\n H8 = att[2]*(3*np.sqrt(5/(48*pi))) * (np.multiply((x2 - y2) , a))\n H9 = att[2]*(3*np.sqrt(5/(12*pi))) *(np.multiply(xy , a))\n H = [H1,H2,H3,H4,H5,H6,H7,H8,H9]\n return H\n\ndef get_shading(N, L):\n c1 = 0.8862269254527579\n c2 = 1.0233267079464883\n c3 = 0.24770795610037571\n c4 = 0.8580855308097834\n c5 = 0.4290427654048917\n\n nx = N[:, 1, :, :]\n ny = N[:, 0, :, :]\n nz = N[:, 2, :, :]\n b, c, h, w = N.shape\n Y1 = c1 * torch.ones(b, h, w)\n Y2 = c2 * nz\n Y3 = c2 * nx\n Y4 = c2 * ny\n Y5 = c3 * (2 * nz * nz - nx * nx - ny * ny)\n Y6 = c4 * nx * nz\n Y7 = c4 * ny * nz\n Y8 = c5 * (nx * nx - ny * ny)\n Y9 = c4 * nx * ny\n\n L = L.type(torch.float)\n sh = torch.split(L, 9, dim=1)\n assert(c == len(sh))\n shading = torch.zeros(b, c, h, w)\n \n if torch.cuda.is_available():\n Y1 = Y1.cuda()\n shading = shading.cuda()\n for j in range(c):\n l = sh[j]\n # Scale to 'h x w' dim\n l = l.repeat(1, h*w).view(b, h, w, 9)\n # Convert l into 'batch size', 'Index SH', 'h', 'w'\n l = l.permute([0, 3, 1, 2])\n # Generate shading\n shading[:, j, :, :] = Y1 * l[:, 0] + Y2 * l[:, 1] + Y3 * l[:, 2] + \\\n Y4 * l[:, 3] + Y5 * l[:, 4] + Y6 * l[:, 5] + \\\n Y7 * l[:, 6] + Y8 * l[:, 7] + Y9 * l[:, 8]\n\n return shading\n\n\n# In[4]:\n\n\ndef create_shading_recon(n_out2,al_out2,light_out):\n M=n_out2.shape[0]\n N=n_out2.shape[1]\n No1=np.reshape(n_out2,(M*N,3),order='F')\n #tex1=al_out2.transpose(2,1,0).reshape(M*M,3)\n \n la = lambertian_attenuation(3)\n HN1 = normal_harmonics(No1, la)\n \n HN1 = np.array(HN1).transpose()\n HS1r=np.dot(HN1,light_out[0:9])\n HS1g=np.dot(HN1,light_out[9:18]) \n HS1b=np.dot(HN1,light_out[18:27]) \n HS1r=np.reshape(HS1r,[1,M ,N],order='F')\n HS1g=np.reshape(HS1g,[1,M ,N],order='F') \n HS1b=np.reshape(HS1b,[1,M ,N],order='F') \n HS1=np.concatenate((HS1r,HS1g,HS1b),axis = 0)\n HS1=np.moveaxis(HS1, 0, -1)\n Tex1=np.multiply(al_out2,HS1)\n \n IRen0=Tex1\n Shd=HS1*(200/255) #200 is added instead of 255 so that not to scale the shading to all white\n Ishd0=Shd\n return IRen0,Ishd0\n\n\n# ## Face Detection\n\n\ndef getmaskcoord(shape, dtype=\"int\"):\n # initialize the list of (x, y)-coordinates\n jawline = []\n #get the jawline\n for i in range(0, 27):\n if(i>16):\n jawline.append((shape.part(i).x, shape.part(i).y-10))\n else:\n jawline.append((shape.part(i).x, shape.part(i).y))\n if(i==21):\n jawline.append((shape.part(27).x, shape.part(19).y))\n# jawline.append((shape.part(17).x, shape.part(19).y-5))\n# jawline.append((shape.part(26).x, shape.part(19).y-5))\n jawline[17:]=np.flip(jawline,axis=0)[0:11]\n jawline=np.asarray(jawline,dtype=dtype)\n #get eyes\n eyes = []\n for i in range(36,48):\n eyes.append((shape.part(i).x, shape.part(i).y))\n \n eyes=np.asarray(eyes,dtype=dtype)\n #get mouth\n mouth = []\n #gigi hilang\n for i in range(48,59):\n #gigi muncul\n #for i in range(48,68):\n mouth.append((shape.part(i).x, shape.part(i).y))\n \n mouth=np.asarray(mouth,dtype=dtype)\n # return the list of (x, y)-coordinates\n return jawline,eyes,mouth\n\n\n# In[7]:\n\n\ndef getmask(img,jawline):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n imArray = np.asarray(img)\n # create mask\n polygon = jawline.flatten().tolist()\n maskIm = Image.new('L', (imArray.shape[1], imArray.shape[0]), 0)\n ImageDraw.Draw(maskIm).polygon(polygon, fill='white')\n #ImageDraw.Draw(maskIm).polygon(polygon, outline=(1))\n # draw eyes\n # righteyes=eyes[0:6].flatten().tolist()\n # ImageDraw.Draw(maskIm).polygon(righteyes, fill='black')\n # lefteyes=eyes[6:].flatten().tolist()\n # ImageDraw.Draw(maskIm).polygon(lefteyes, fill='black')\n # # draw mouth\n # mouth=mouth.flatten().tolist()\n # ImageDraw.Draw(maskIm).polygon(mouth, fill='black')\n \n mask = np.array(maskIm)\n return mask\n\n\n# In[8]:\n\n\ndef getface(img,jawline,eyes,mouth):\n im=img.copy()\n img = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)\n imArray = np.asarray(img)\n # create mask\n maskIm = Image.new('L', (imArray.shape[1], imArray.shape[0]), color=255)\n #create jawline\n jaw = jawline.flatten().tolist()\n ImageDraw.Draw(maskIm).polygon(jaw, fill='black')\n #draw eyes\n righteyes=eyes[0:6].flatten().tolist()\n ImageDraw.Draw(maskIm).polygon(righteyes, fill='white')\n lefteyes=eyes[6:].flatten().tolist()\n ImageDraw.Draw(maskIm).polygon(lefteyes, fill='white')\n # draw mouth\n mouth=mouth.flatten().tolist()\n ImageDraw.Draw(maskIm).polygon(mouth, fill='white')\n mask = np.array(maskIm)\n cutmask = cv2.bitwise_or(im,im,mask=mask)\n return mask\n\n\n# In[9]:\n\n\ndef createmask (image):\n predictor_path = './dlib/shape_predictor_68_face_landmarks.dat'\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(predictor_path) \n \n # if isinstance(image_path, str): \n # img = dlib.load_rgb_image(image_path) \n # else :\n img= np.array(image)\n rects = detector(img, 1)\n #for (i, rect) in enumerate(rects):\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = predictor(img, rects[0])\n jawline,eyes,mouth = getmaskcoord(shape)\n face_mask = getmask(img,jawline)\n facepart_mask= getface(img,jawline,eyes,mouth)\n \n return face_mask,facepart_mask\n\n#main part\ndef main(img, mask ,template = None, degree = None , skin = None):\n M = 256\n P = 256\n L = 256\n \n net = SfsNetPipeline()\n if torch.cuda.is_available():\n net = nn.DataParallel(net)\n net = net.cuda()\n net.eval()\n checkpoints=torch.load('./model/sfs_net_model_9.pkl')\n net.load_state_dict(checkpoints['model_state_dict'],strict = False)\n\n if isinstance(img, str): \n img=cv2.imread(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img,(M,M))\n face_mask,_=createmask(img)\n img = np.float32(img)/255\n #facepart_mask=cv2.cvtColor(facepart_mask, cv2.COLOR_GRAY2RGB)\n face_mask = cv2.cvtColor(face_mask, cv2.COLOR_GRAY2RGB)\n face_mask=np.float32(face_mask)/255 \n #facepart_mask=np.float32(facepart_mask)/255 \n\n face_mask=cv2.resize(face_mask,(P,L))\n #facepart_mask=cv2.resize(facepart_mask,(P,L))\n\n #create mask for input image\n #facepart=np.multiply(img,facepart_mask) + np.multiply((1-facepart_mask),np.ones((L,P,3)))\n if (mask == True):\n img = np.multiply(img,face_mask)\n transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((P,L)),\n transforms.ToTensor()\n ])\n img *=255\n img = np.uint8(img)\n im_input = transform(img)\n im_input = im_input[np.newaxis,:,:,:]\n a=im_input[0].view(3,256,256).permute(1,2,0)\n n, a, l, sh,t,a_new,sh_new,t_new = net(im_input)\n al_out = a.detach().cpu().numpy()\n\n #change to h,w,c\n al_out = np.squeeze(al_out, 0)\n al_out=np.moveaxis(al_out, 0, -1)\n\n #normalize\n #n_out = (n_out + 1) / 2\n\n\n #%creates reconstruction and shading image\n al_out*=255\n al_out[al_out>255]= 255\n al_out[al_out<0]=0\n al_out = np.uint8(al_out)\n\n\n # #changeskin part\n # if skin is not None :\n # pref = 0\n # if skin == \"Caucasian\":\n # pref = 1\n # al_out = race(al_out, pref = pref)\n\n # # mask=cv2.resize(mask,(P,L))\n # # mask2=cv2.resize(mask2,(P,L))\n # #uncomment untuk melakukan color harmonize\n # if template is not None :\n # al_out = harmonize(al_out,degree = degree, template= template)\n\n al_out=transform(al_out).cuda()\n al_out = al_out[np.newaxis,:]\n\n\n Ishd=get_shading(n,l) \n\n # Ishd = Ishd.detach().cpu().numpy()\n # Ishd = np.squeeze(Ishd, 0)\n # Ishd = np.moveaxis(Ishd, 0, -1)\n # Ishd *= 255\n # Ishd = np.clip (Ishd,0,255)\n # Ishd = np.uint8(Ishd)\n # Ishd = cv2.cvtColor(Ishd, cv2.COLOR_RGB2GRAY)\n # Ishd = cv2.cvtColor(Ishd, cv2.COLOR_GRAY2RGB)\n # Ishd = transform(Ishd).cuda()\n # Ishd = Ishd[np.newaxis,:]\n\n # a=Ishd[0].view(3,256,256).permute(1,2,0)\n # a=a.detach().cpu().numpy()\n # b=sh[0].view(3,256,256).permute(1,2,0)\n # b=b.detach().cpu().numpy()\n # fig=plt.figure(figsize=(3, 3))\n # fig.add_subplot(131)\n # plt.imshow(a)\n # fig.add_subplot(132)\n # plt.imshow(b) \n # plt.show()\n\n #replace albedo\n # al_out = cv2.imread(\"/home/cgal/al.png\")\n # al_out = cv2.cvtColor(al_out, cv2.COLOR_BGR2RGB)\n # al_out = transform(al_out).cuda()\n # al_out = al_out[np.newaxis,:]\n\n\n Irec = al_out*Ishd\n\n im_input = im_input.detach().cpu().numpy()\n n_out = n.detach().cpu().numpy()\n light_out = l.detach().cpu().numpy()\n Irec = Irec.detach().cpu().numpy()\n Ishd = Ishd.detach().cpu().numpy()\n al_out = al_out.detach().cpu().numpy()\n\n\n al_out = np.squeeze(al_out, 0)\n al_out=np.moveaxis(al_out, 0, -1)\n n_out = np.squeeze(n_out, 0)\n n_out=np.moveaxis(n_out, 0, -1)\n light_out = np.moveaxis(light_out, 0, -1)\n Ishd = np.squeeze(Ishd, 0)\n Ishd=np.moveaxis(Ishd, 0, -1)\n Irec = np.squeeze(Irec, 0)\n Irec=np.moveaxis(Irec, 0, -1)\n \n im_input = np.squeeze(im_input, 0)\n im_input=np.moveaxis(im_input, 0, -1)\n im_input = cv2.cvtColor(im_input, cv2.COLOR_BGR2RGB)\n \n\n n_out = (n_out + 1) / 2\n\n image=im_input\n # normal = np.multiply(n_out,face_mask)+ np.multiply((1-face_mask),np.ones((L,P,3)))\n # albedo=np.multiply(al_out,face_mask)+ np.multiply((1-face_mask),np.ones((L,P,3)))\n # shading = np.multiply(Ishd,face_mask)+ np.multiply((1-face_mask),np.ones((L,P,3)))\n # recon=np.multiply(Irec,face_mask)+ np.multiply((1-face_mask),np.ones((L,P,3)))\n normal = n_out.copy()\n albedo=al_out.copy()\n shading = Ishd.copy()\n recon=Irec.copy()\n return image,normal,albedo,shading,recon\n\n\n\n\n\n","repo_name":"RichardoTiono/Original_Face_Recovery","sub_path":"GUI/function/sfsnet.py","file_name":"sfsnet.py","file_ext":"py","file_size_in_byte":12142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3419040124","text":"from iofog.microservices.client import Client\nfrom iofog.microservices.exception import IoFogException\nfrom iofog.microservices.iomessage import IoMessage\nfrom iofog.microservices.listener import *\nimport json\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport os\nimport threading\nimport shutil\nfrom datetime import datetime\n\n\ncurrent_config = None\nlock = threading.Lock()\n\nmodel = None\nclass_names=['!', '(', ')', '+', ',', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '=', '[', ']', 'a', 'alpha', 'ascii_124', 'b', 'beta', 'c', 'cos', 'd', 'delta', 'div', 'e', 'exists', 'f', 'forall', 'forward_slash', 'g', 'gamma', 'geq', 'gt', 'h', 'i', 'in', 'infty', 'int', 'j', 'k', 'l', 'lambda', 'ldots', 'leq', 'lim', 'log', 'lt', 'm', 'mu', 'n', 'neq', 'o', 'p', 'phi', 'pi', 'pm', 'prime', 'q', 'r', 'rightarrow', 's', 'sigma', 'sin', 'sqrt', 'sum', 't', 'tan', 'theta', 'u', 'v', 'w', 'x', 'y', 'z', '{', '}']\n\nmodel_path = '/app/myvol/models'\ndata_path='/app/myvol/current'\nstorage_path='/app/myvol/storage'\nmodel_storage_path='/app/myvol/storage/models'\n\nfile_count = 0\n\n\n\ntry:\n client = Client()\nexcept IoFogException as e:\n print(e)\n \n#lists files in path\ndef list_file(path):\n files = []\n for file in os.listdir(path):\n current_path=os.path.join(path, file)\n files.append(current_path)\n return files\n \n#get latest file according to modification time\ndef get_last(models):\n time = os.stat(models[0]).st_mtime\n newer = models[0]\n for model in models:\n file_time = os.stat(model).st_mtime\n if time is None or time 0:\n try:\n config = client.get_config()\n break\n except IoFogException as ex:\n attempt_limit -= 1\n print(ex)\n \n if attempt_limit == 0:\n print('Config update failed :(')\n return\n\n lock.acquire()\n global current_config\n current_config = config\n lock.release()\n \ndef update_model():\n try:\n lock.acquire()\n global model\n models = list_file(model_path)\n \n if models:\n \n model_name = get_last(models)\n model = tf.keras.models.load_model(model_name)\n \n file = open(\"/app/myvol/model_log.txt\", \"a\")\n now = datetime.now()\n file.write(\"{}, using: {}\\n\".format(now.strftime(\"%d/%m/%Y %H:%M:%S\"), model_name))\n file.close()\n \n lock.release()\n \n for m in models:\n move(m,model_storage_path)\n \n except IOError as e:\n print(e)\n \ndef predict(file_path):\n\n try:\n \n if model is not None:\n\n #image load and prediction\n img = cv2.imread(file_path)\n \n img = cv2.resize(img, (45,45), interpolation = cv2.INTER_AREA)\n img = img.reshape(1, 45, 45, 3)\n\n pred = model.predict(img)\n res = class_names[np.argmax(pred)]\n \n #moves image to storage\n move(file_path, storage_path)\n \n #append result to result file\n file = open(\"/app/myvol/res.txt\", \"a\")\n file.write('{}\\n'.format(res))\n file.close()\n \n #prepare ioMesagge and send result\n msg=IoMessage()\n msg.infotype='application/json'\n msg.infoformat='text/utf-8'\n msg.contentdata=json.dumps({\"res\":res})\n msg.contextdata=\"\"\n\n client.post_message_via_socket(msg)\n \n except IoFogException as e:\n print(e)\n\nclass ControlListener(IoFogControlWsListener):\n def on_control_signal(self):\n update_config()\n\n\nclass MessageListener(IoFogMessageWsListener):\n def on_receipt(self, message_id, timestamp):\n print ('Receipt: {} {}'.format(message_id, timestamp))\n \n def on_message(self, msg):\n print ('Received message')\n update_model()\n\n\nupdate_config()\nupdate_model()\nclient.establish_message_ws_connection(MessageListener())\nclient.establish_control_ws_connection(ControlListener())\n\n#lists file in path\n#if any, calls predict\nwhile True:\n files = list_file(data_path)\n for file in files:\n predict(file)","repo_name":"fmonducci/iofog-project","sub_path":"python-test-v2/sender/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25758763823","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport argparse\n\nimport tensorflow as tf\n\n\ndef main(args):\n checkpoint_dir = args.input\n checkpoint = tf.train.get_checkpoint_state(checkpoint_dir)\n\n with tf.Session() as sess:\n new_vars = []\n for var_name, var_shape in tf.train.list_variables(checkpoint_dir):\n var = tf.train.load_variable(checkpoint_dir, var_name)\n if var_name.startswith('sbert_classifier/ebert_classifier/') or \\\n var_name.endswith('adam_m') or var_name.endswith('adam_v'):\n continue\n\n # Set the new name\n new_name = var_name.replace('sbert_classifier/', 'ebert_classifier/')\n\n if var_name == new_name:\n print('same var name, skip {}'.format(var_name))\n else:\n print('{} will be renamed to {}'.format(var_name, new_name))\n if not args.dry_run:\n # Rename the variable\n new_var = tf.Variable(var, name=new_name)\n new_vars.append(new_var)\n\n if not args.dry_run:\n # Save the variables\n saver = tf.train.Saver(new_vars)\n sess.run(tf.global_variables_initializer())\n saver.save(sess, checkpoint.model_checkpoint_path,\n write_meta_graph=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('input', type=str, help='original checkpoint')\n parser.add_argument(\"-dr\", \"--dry_run\", action='store_true',\n help=\"dry run renaming\")\n main(parser.parse_args())\n","repo_name":"StonyBrookNLP/deformer","sub_path":"tools/process_ckpt.py","file_name":"process_ckpt.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"62"} +{"seq_id":"16185306605","text":"import load_references, load_illumina_data, load_rnaseq_data\nfrom microarray.process import aggregate_by_probe_set\nimport numpy as np\nfrom scipy import stats, cluster\nimport collections\nimport pandas as pd\n\nMB_GROUPS = (\n ('WNT', ('WIF1', 'TNC', 'GAD1', 'DKK2', 'EMX2'),),\n ('SHH', ('PDLIM3', 'EYA1', 'HHIP', 'ATOH1', 'SFRP1'),),\n ('Group C', ('IMPG2', 'GABRA5', 'EYS', 'NRL', 'MAB21L2', 'NPR3'),), # EYS = EGFL11\n ('Group D', ('KCNA1', 'EOMES', 'KHDRBS2', 'RBM24', 'UNC5D', 'OAS1')),\n)\n\nREF_GROUPS = (\n ('WNT', ('WIF1', 'TNC', 'GAD1', 'DKK2', 'EMX2'),),\n ('SHH', ('PDLIM3', 'EYA1', 'HHIP', 'ATOH1', 'SFRP1'),),\n ('Group C', ('IMPG2', 'GABRA5', 'EGFL11', 'NRL', 'MAB21L2', 'NPR3'),), # EYS = EGFL11\n ('Group D', ('KCNA1', 'EOMES', 'KHDRBS2', 'RBM24', 'UNC5D', 'OAS1')),\n)\n\n\ndef annotate_by_MB_group(df):\n \"\"\"\n Annotate the supplied dataframe by adding a new column, 'mb_group', that lists the MB group the gene is in\n or null if none.\n :param df:\n :return:\n \"\"\"\n res = df.copy()\n res['mb_group'] = [np.nan] * len(df.index)\n\n for grp, arr in MB_GROUPS:\n res.loc[res.index.isin(arr), 'mb_group'] = grp\n\n return res\n\n\ndef plot_microarray_mb_gene_expression(mb_samples=('ICb1299-III', 'ICb1299-IV')):\n \"\"\"\n Produce 2 publications:\n 1) bar chart subplots showing the absolute normed intensity values for the MB-implicated genes in both\n healthy and MB samples.\n 2) bar chart subplots showing the log2 fold change in those same genes\n :param mb_samples: Iterable with the sample names to use in the MB data\n :return:\n \"\"\"\n from plotting import bar\n plt = bar.plt\n METHOD = 'median'\n HEALTHY_SAMPLE_NAMES = [\n 'NT-1197',\n 'NCb-1',\n 'NCb-2',\n 'A911105',\n 'A508112',\n 'A508285',\n ]\n HEALTHY_SAMPLE_NAMES += [t + '-R' for t in HEALTHY_SAMPLE_NAMES]\n\n # load full microarray data\n marray_data = load_illumina_data.load_normed_microarray_data(pval=0.01)\n # replace null with zero\n marray_data.fillna(value=0., inplace=True)\n # load probe set definitions\n probe_set = load_illumina_data.load_illumina_array_library()\n marray_ann = load_illumina_data.add_gene_symbol_column(marray_data, probe_set)\n marray_by_gene = aggregate_by_probe_set(marray_ann, method=METHOD)\n mb_sample_names = list(mb_samples) + [t + '-R' for t in mb_samples]\n\n # pick out samples and aggregate\n mb = marray_by_gene.loc[:, mb_sample_names].mean(axis=1)\n he = marray_by_gene.loc[:, HEALTHY_SAMPLE_NAMES].mean(axis=1)\n\n # add MB group column\n marray_by_gene = annotate_by_MB_group(marray_by_gene)\n\n mb_grouped = dict(\n [(grp, mb.loc[marray_by_gene.mb_group == grp]) for grp, arr in MB_GROUPS]\n )\n he_grouped = dict(\n [(grp, he.loc[marray_by_gene.mb_group == grp]) for grp, arr in MB_GROUPS]\n )\n\n # figure 1: absolute TPM\n\n data = collections.OrderedDict([\n (grp, [he_grouped[grp], mb_grouped[grp]]) for grp, _ in REF_GROUPS\n ])\n fig, axs = bar.multi_grouped_bar_chart(data, xlabel_coords=(0.5, -.21))\n\n axs[-1].legend(['Healthy cerebellum', 'MB'])\n axs[0].set_ylabel('Normed intensity')\n ylim = list(axs[-1].get_ylim())\n ylim[0] = -1e-6\n axs[-1].set_ylim(ylim)\n plt.subplots_adjust(left=0.1, right=0.99, bottom=0.2, top=0.95, wspace=0.08, hspace=0.)\n\n # figure 2: log2 fold change\n\n LOG_MIN = -7\n LOG_MAX = 7\n log_fold_diff = {}\n for grp in mb_grouped:\n t = np.log2(mb_grouped[grp] / he_grouped[grp])\n log_fold_diff[grp] = t\n\n data = collections.OrderedDict([\n (grp, [log_fold_diff[grp]]) for grp, _ in REF_GROUPS\n ])\n fig, axs = bar.multi_grouped_bar_chart(data, xlabel_coords=(0.5, -.21), ylim=[LOG_MIN, LOG_MAX], colours=['gray'])\n\n axs[0].set_ylabel('Log2 fold change')\n plt.subplots_adjust(left=0.1, right=0.99, bottom=0.2, top=0.95, wspace=0.08, hspace=0.)\n\n\ndef plot_rna_seq_mb_gene_expression():\n \"\"\"\n Produce 2 publications:\n 1) bar chart subplots showing the absolute TPM values for RNA-Seq in the MB-implicated genes in both\n healthy and MB samples.\n 2) bar chart subplots showing the log2 fold change in those same genes\n :return:\n \"\"\"\n from plotting import bar\n plt = bar.plt\n # load 2015 RNA-Seq gene activity counts\n # ignore the confidence intervals, which are wrong\n rna_z = load_rnaseq_data.load_rnaseq_cufflinks_gene_count_data(unit='tpm')\n\n # load Allen RNA-Seq cerebellum activity TPM\n _, rna_a, meta = load_references.load_cerebellum_rnaseq_reference_data()\n\n # annotate both by MB group\n rna_z['mb_group'] = [np.nan] * len(rna_z.index)\n rna_a['mb_group'] = [np.nan] * len(rna_a.index)\n\n for grp, arr in MB_GROUPS:\n rna_z.loc[rna_z.index.isin(arr), 'mb_group'] = grp\n rna_a.loc[rna_a.index.isin(arr), 'mb_group'] = grp\n\n rna_z_mean = rna_z.mean(axis=1)\n rna_a_mean = rna_a.mean(axis=1)\n\n rna_a_grouped = dict(\n [(grp, rna_a_mean.loc[rna_a.mb_group == grp]) for grp, arr in MB_GROUPS]\n )\n rna_z_grouped = dict(\n [(grp, rna_z_mean.loc[rna_z.mb_group == grp]) for grp, arr in MB_GROUPS]\n )\n\n # figure 1: absolute TPM\n\n data = collections.OrderedDict([\n (grp, [rna_a_grouped[grp], rna_z_grouped[grp]]) for grp, _ in REF_GROUPS\n ])\n fig, axs = bar.multi_grouped_bar_chart(data, xlabel_coords=(0.5, -.21))\n\n axs[-1].legend(['Healthy cerebellum', 'MB'])\n axs[0].set_ylabel('TPM')\n ylim = list(axs[-1].get_ylim())\n ylim[0] = -1e-6\n axs[-1].set_ylim(ylim)\n plt.subplots_adjust(left=0.1, right=0.99, bottom=0.2, top=0.95, wspace=0.08, hspace=0.)\n\n # figure 2: log2 fold change\n\n LOG_MIN = -7\n LOG_MAX = 7\n log_fold_diff = {}\n for grp in rna_z_grouped:\n t = np.log2(rna_z_grouped[grp] / rna_a_grouped[grp])\n log_fold_diff[grp] = t\n\n data = collections.OrderedDict([\n (grp, [log_fold_diff[grp]]) for grp, _ in REF_GROUPS\n ])\n fig, axs = bar.multi_grouped_bar_chart(data, xlabel_coords=(0.5, -.21), ylim=[LOG_MIN, LOG_MAX], colours=['gray'])\n\n axs[0].set_ylabel('Log2 fold change')\n plt.subplots_adjust(left=0.1, right=0.99, bottom=0.2, top=0.95, wspace=0.08, hspace=0.)\n\n\ndef comparison_healthy_vs_mb_microarray_northcott_genes():\n from matplotlib import rc, pyplot as plt, gridspec as gridspec\n import seaborn as sns\n plt.interactive(True)\n sns.set_style('white')\n\n METHOD = 'median'\n HEALTHY_SAMPLE_NAMES = [\n 'NT1197',\n 'NCb1',\n 'NCb2',\n 'A911105',\n 'A508112',\n 'A508285',\n ]\n SAMPLE_GROUPS = (\n ('WNT', ('Pt1140', 'ICb1140-II', 'ICb1140-III', 'Pt1192', 'ICb1192-I', 'ICb1192-III', 'ICb1192-V')),\n ('SSH', ('Pt1338', 'ICb1338-I', 'ICb1338-III', 'ICb984-I', 'ICb984-III', 'ICb984-V')),\n ('Group C', (\n 'ICb1197-I',\n 'ICb1197-III',\n 'Pt1494',\n 'ICb1494-I',\n 'ICb1494-III',\n 'ICb1494-V',\n 'Pt1572',\n 'ICb1572-I',\n 'ICb1572-III',\n 'ICb1572-V',\n 'Pt1595',\n 'ICb1595-I',\n 'ICb1595-III',\n )),\n ('Group D', (\n 'Pt1078',\n 'ICb1078-I',\n 'ICb1078-III',\n 'ICb1078-V',\n 'Pt1299',\n 'ICb1299-I',\n 'ICb1299-III',\n 'ICb1299-IV',\n 'Pt1487',\n 'ICb1487-I',\n 'ICb1487-III',\n )),\n )\n\n all_northcott_patients = []\n for grp, arr in SAMPLE_GROUPS:\n all_northcott_patients.extend(arr)\n\n # load full microarray data\n marray_data = load_illumina_data.load_normed_microarray_data(pval=0.05)\n\n probe_set = load_illumina_data.load_illumina_array_library()\n marray_ann = load_illumina_data.add_gene_symbol_column(marray_data, probe_set)\n marray_by_gene = load_illumina_data.aggregate_by_probe_set(marray_ann, method=METHOD)\n\n all_mb_genes = []\n for _, arr in REF_GROUPS:\n all_mb_genes.extend(arr)\n\n # standardised scores by gene\n marray_by_gene_stand = (\n marray_by_gene.subtract(marray_by_gene.mean(axis=1), axis=0)\n .divide(marray_by_gene.std(axis=1), axis=0)\n )\n\n\n\n\n # take mean over repeats\n for sn in load_illumina_data.SAMPLE_NAMES:\n marray_by_gene.loc[:, sn] = marray_by_gene.loc[:, [sn, sn + '-R']].mean(axis=1)\n marray_by_gene_stand.loc[:, sn] = marray_by_gene_stand.loc[:, [sn, sn + '-R']].mean(axis=1)\n marray_by_gene = marray_by_gene.loc[:, load_illumina_data.SAMPLE_NAMES]\n marray_by_gene_stand = marray_by_gene_stand.loc[:, load_illumina_data.SAMPLE_NAMES]\n\n VMAX = 15000\n\n # v1: absolute values\n\n fig = plt.figure(figsize=[5, 8])\n gs = gridspec.GridSpec(2, len(REF_GROUPS),\n height_ratios=[1, 12],\n width_ratios=[len(arr) for _, arr in REF_GROUPS])\n gs.update(\n left=0.2,\n right=0.95,\n top=0.95,\n bottom=0.15,\n wspace=0.,\n hspace=0.1)\n cbar_kws = {\"orientation\": \"horizontal\"}\n\n for i, (grp, arr) in enumerate(REF_GROUPS):\n ax = fig.add_subplot(gs[1:, i])\n if i == (len(REF_GROUPS) - 1):\n cbar = True\n cbar_ax = fig.add_subplot(gs[0, :])\n else:\n cbar = False\n cbar_ax = None\n sns.heatmap(\n marray_by_gene.loc[arr, all_northcott_patients + HEALTHY_SAMPLE_NAMES].transpose(),\n ax=ax,\n vmin=0,\n vmax=VMAX,\n square=True,\n cmap='Reds',\n cbar=cbar,\n cbar_ax=cbar_ax,\n cbar_kws=cbar_kws\n )\n ax.set_xticklabels(arr, rotation=90)\n if i == 0:\n plt.yticks(rotation=0)\n else:\n ax.set_yticklabels([])\n ax.set_xlabel(grp)\n ax.xaxis.set_label_coords(.5, -.15)\n cbar_ax.set_title('$\\log_2$(Normalised intensity)')\n\n fig.savefig(\"marray_all_samples_mb_gene_activity_heatmap.png\", dpi=200)\n fig.savefig(\"marray_all_samples_mb_gene_activity_heatmap.pdf\", dpi=200)\n\n # v2: log2(absolute) values\n\n fig = plt.figure(figsize=[5, 8])\n gs = gridspec.GridSpec(2, len(REF_GROUPS),\n height_ratios=[1, 12],\n width_ratios=[len(arr) for _, arr in REF_GROUPS])\n gs.update(\n left=0.2,\n right=0.95,\n top=0.95,\n bottom=0.15,\n wspace=0.,\n hspace=0.1)\n cbar_kws = {\"orientation\": \"horizontal\"}\n\n for i, (grp, arr) in enumerate(REF_GROUPS):\n ax = fig.add_subplot(gs[1:, i])\n if i == (len(REF_GROUPS) - 1):\n cbar = True\n cbar_ax = fig.add_subplot(gs[0, :])\n else:\n cbar = False\n cbar_ax = None\n sns.heatmap(\n np.log2(marray_by_gene.loc[arr, all_northcott_patients + HEALTHY_SAMPLE_NAMES].transpose()),\n ax=ax,\n vmin=0,\n vmax=np.ceil(np.log2(VMAX)),\n square=True,\n cmap='Reds',\n cbar=cbar,\n cbar_ax=cbar_ax,\n cbar_kws=cbar_kws\n )\n ax.set_xticklabels(arr, rotation=90)\n if i == 0:\n plt.yticks(rotation=0)\n else:\n ax.set_yticklabels([])\n ax.set_xlabel(grp)\n ax.xaxis.set_label_coords(.5, -.15)\n cbar_ax.set_title('$\\log_2$(Normalised intensity)')\n\n fig.savefig(\"marray_all_samples_mb_gene_log_activity_heatmap.png\", dpi=200)\n fig.savefig(\"marray_all_samples_mb_gene_log_activity_heatmap.pdf\", dpi=200)\n\n # v3: standardised by score values\n\n fig = plt.figure(figsize=[5, 8])\n gs = gridspec.GridSpec(2, len(REF_GROUPS),\n height_ratios=[1, 12],\n width_ratios=[len(arr) for _, arr in REF_GROUPS])\n gs.update(\n left=0.2,\n right=0.95,\n top=0.95,\n bottom=0.15,\n wspace=0.,\n hspace=0.1)\n cbar_kws = {\"orientation\": \"horizontal\"}\n\n for i, (grp, arr) in enumerate(REF_GROUPS):\n ax = fig.add_subplot(gs[1:, i])\n if i == (len(REF_GROUPS) - 1):\n cbar = True\n cbar_ax = fig.add_subplot(gs[0, :])\n else:\n cbar = False\n cbar_ax = None\n sns.heatmap(\n marray_by_gene_stand.loc[arr, all_northcott_patients + HEALTHY_SAMPLE_NAMES].transpose(),\n ax=ax,\n vmin=-1,\n vmax=1.,\n square=True,\n cmap='RdBu_r',\n cbar=cbar,\n cbar_ax=cbar_ax,\n cbar_kws=cbar_kws\n )\n ax.set_xticklabels(arr, rotation=90)\n if i == 0:\n plt.yticks(rotation=0)\n else:\n ax.set_yticklabels([])\n ax.set_xlabel(grp)\n ax.xaxis.set_label_coords(.5, -.15)\n cbar_ax.set_title('Standardised score by gene')\n\n fig.savefig(\"marray_all_samples_mb_standardised_by_gene_activity_heatmap.png\", dpi=200)\n fig.savefig(\"marray_all_samples_mb_standardised_by_gene_activity_heatmap.pdf\", dpi=200)\n\n\nif __name__ == '__main__':\n\n PVAL_MIN = 0.01\n from matplotlib import pyplot as plt\n plt.interactive(True)\n\n expr, meta = load_references.load_cerebellum_microarray_reference_data()\n # convert to by-gene data\n ref_gene_expr = load_references.microarray_gene_markers(expr)\n\n marray_data_normed = load_illumina_data.load_normed_microarray_data(pval=PVAL_MIN)\n cat = load_illumina_data.load_illumina_array_library()\n # convert to by-gene data\n m1 = load_illumina_data.convert_microarray_to_gene_activity(marray_data_normed, cat)\n\n # find intersecting gene set\n gene_set = m1.index.intersection(ref_gene_expr.index)\n\n m2 = m1.copy()\n m2 /= m2.sum(axis=0)\n\n # m2 = m1.loc[gene_set]\n # m2 /= m2.sum(axis=0)\n\n ref = ref_gene_expr.loc[gene_set]\n ref_mean = ref.mean(axis=1)\n ref_mean /= ref_mean.sum()\n\n # get groups of genes\n fig, axs = plt.subplots(ncols=len(MB_GROUPS), sharey=True, figsize=(10, 6))\n\n for i in range(len(MB_GROUPS)):\n grp, mb_arr = MB_GROUPS[i]\n _, ref_arr = REF_GROUPS[i]\n ax = axs[i]\n baseline = ref_mean[ref_mean.index.isin(ref_arr)]\n topline = m1[m1.index.isin(mb_arr)]\n\n ax = axs[i]\n topline.transpose().boxplot(ax=ax, rot=90)\n ax.set_xlabel(grp)\n ax.xaxis.set_label_coords(0.5, -.21)\n if i == 0:\n ax.set_ylabel('Normalised intensity')\n\n plt.subplots_adjust(left=0.1, right=0.99, bottom=0.2, top=0.99, wspace=0.05, hspace=0.)\n\n # # excise gene IDs\n # ex = expr.copy()\n # ex = ex.ix[:, 2:]\n\n\n\n # create triangular matrix of lin regression results\n # n = ex.columns.size\n # ex.columns = pd.Int64Index(range(n))\n\n # res = pd.DataFrame(columns=['i', 'j', 'linreg_intercept', 'linreg_slope', 'linreg_rsq', 'p_wilcox'])\n # for i in range(n):\n # for j in range(i + 1, n):\n # a = all_expr[[i, j]].dropna(axis=0)\n # a0 = a[i]\n # a1 = a[j]\n # lr = stats.linregress(a0, a1)\n # _, pwilcox = stats.wilcoxon(a0, a1)\n # this_row = pd.Series(data=(i, j, lr.intercept, lr.slope, lr.rvalue ** 2, pwilcox), index=res.columns)\n # res = res.append(this_row, ignore_index=True)\n #\n\n # with open('cerebellum_pairwise_comparison.pkl', 'rb') as f:\n # res = dill.load(f)\n\n # cluster using Rsq as distance\n # Z = cluster.hierarchy.linkage(res.linreg_rsq) # Z is a linkage matrix\n # dendro = cluster.hierarchy.dendrogram(Z, orientation='left')\n","repo_name":"gaberosser/qmul-bioinf","sub_path":"scripts/comparison_rnaseq_microarray/comparisons.py","file_name":"comparisons.py","file_ext":"py","file_size_in_byte":15583,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"21958237506","text":"from .api_test_case import APITestCase\n\n\nclass OfficeHoursTestCase(APITestCase):\n\n def assert_ui_notification(self, response, success, notification,\n office_hour=None):\n data = response.data\n detail = notification if notification else \"\"\n header = self.ui_header(success, office_hour)\n self.assertTrue(all([\n data['success'] == success,\n data['header'] == header,\n data['detail'] == detail\n ]), msg='Notification data was not as expected')\n\n def ui_header(self, success, office_hour):\n if success:\n return self.success_header.format(mentor_name(office_hour))\n else:\n return self.fail_header\n\n\ndef mentor_name(office_hour):\n if office_hour and office_hour.mentor:\n return office_hour.mentor.full_name()\n return None\n","repo_name":"masschallenge/impact-api","sub_path":"web/impact/impact/tests/office_hours_test_case.py","file_name":"office_hours_test_case.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"41257007834","text":"# -*- coding: utf-8 -*-\n\n\ndef main():\n from collections import defaultdict\n import sys\n\n input = sys.stdin.readline\n\n h, w, n = map(int, input().split())\n d = defaultdict(list)\n ids = defaultdict(int)\n\n # See:\n # https://www.youtube.com/watch?v=KMsEXLe_8go\n for i in range(n):\n ri, ci, ai = map(int, input().split())\n d[-ai].append((ri, ci)) # キーを降順にするため-1倍\n ids[(ri, ci)] = i # 座標ri, ciが何番目か記録\n \n row_max = defaultdict(int)\n col_max = defaultdict(int)\n ans = [0] * n\n \n for key in sorted(d.keys()):\n # aiが同じ場合に対処するため、移動回数の取得・更新を分ける\n # 取得\n for ri, ci in d[key]:\n now = max(row_max[ri], col_max[ci])\n ans[ids[(ri, ci)]] = now\n\n # 更新\n for ri, ci in d[key]:\n now = ans[ids[(ri, ci)]]\n row_max[ri] = max(row_max[ri], now + 1)\n col_max[ci] = max(col_max[ci], now + 1)\n \n print(*ans, sep=\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KATO-Hiro/AtCoder","sub_path":"AtCoder_Virtual_Contest/mayocorn20230504/f/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"25006475565","text":"\"\"\"Urls for api endpoints for fn_portal\n\n+ projects\n+ /sams\n+ /catcnts\n+ /biosamples\n\n+ //\n\n+ /////\n\n\"\"\"\n\n\nfrom django.urls import path, re_path\n\nfrom ..views import ( # readonly endpoints:; FN011ViewSet,; CRUD Endpoints:\n EffortList,\n FN122DetailView,\n FN122ListView,\n FN121GpsTrackList,\n)\n\nurlpatterns = [\n path(\"fn122/\", EffortList.as_view(), name=\"effort_list\"),\n path(\"/\", FN122ListView.as_view(), name=\"FN122_listview\"),\n path(\"fn122//\", FN122DetailView.as_view(), name=\"FN122_detailview\"),\n path(\"fn121GpsTracks/\", FN121GpsTrackList.as_view(), name=\"fn121_gpstrack_list\"),\n]\n","repo_name":"AdamCottrill/FishNetPortal","sub_path":"fn_portal/api/urls/FN122.py","file_name":"FN122.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40014545082","text":"# Problem Set 4B\r\n# Name: Hoang Nguyen\r\n# Time Spent: 1:30\r\n\r\nimport string\r\n\r\n### HELPER CODE ###\r\ndef load_words(file_name):\r\n '''\r\n file_name (string): the name of the file containing \r\n the list of words to load \r\n \r\n Returns: a list of valid words. Words are strings of lowercase letters.\r\n \r\n Depending on the size of the word list, this function may\r\n take a while to finish.\r\n '''\r\n # print(\"Loading word list from file...\")\r\n # inFile: file\r\n inFile = open(file_name, 'r')\r\n # wordlist: list of strings\r\n wordlist = []\r\n for line in inFile:\r\n wordlist.extend([word.lower() for word in line.split(' ')])\r\n # print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist\r\n\r\ndef is_word(word_list, word):\r\n '''\r\n Determines if word is a valid word, ignoring\r\n capitalization and punctuation\r\n\r\n word_list (list): list of words in the dictionary.\r\n word (string): a possible word.\r\n \r\n Returns: True if word is in word_list, False otherwise\r\n\r\n Example:\r\n >>> is_word(word_list, 'bat') returns\r\n True\r\n >>> is_word(word_list, 'asdf') returns\r\n False\r\n '''\r\n word = word.lower()\r\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\r\n return word in word_list\r\n\r\ndef get_story_string():\r\n \"\"\"\r\n Returns: a story in encrypted text.\r\n \"\"\"\r\n f = open(\"story.txt\", \"r\")\r\n story = str(f.read())\r\n f.close()\r\n return story\r\n\r\n### END HELPER CODE ###\r\n\r\nWORDLIST_FILENAME = 'words.txt'\r\n\r\nclass Message(object):\r\n # Init class, holds input text and list of valid words\r\n def __init__(self, text):\r\n self.message_text = text\r\n self.valid_words = load_words(WORDLIST_FILENAME)\r\n\r\n # Getter for message text\r\n def get_message_text(self):\r\n return self.message_text\r\n\r\n # Getter for valid words\r\n def get_valid_words(self):\r\n return list(self.valid_words)\r\n\r\n # Return a dictionary foor cipher message\r\n def build_shift_dict(self, shift):\r\n\r\n # For return\r\n shift_dict = {}\r\n\r\n # Cipher lower and upper case simultanously\r\n for i in range(len(string.ascii_lowercase)):\r\n cipher = (i + shift) % 26\r\n shift_dict[string.ascii_lowercase[i]] = string.ascii_lowercase[cipher]\r\n shift_dict[string.ascii_uppercase[i]] = string.ascii_uppercase[cipher]\r\n\r\n # Return and terminate\r\n return shift_dict\r\n\r\n # Applying the shift cipher to the current message text\r\n def apply_shift(self, shift):\r\n\r\n # Vaiables for return and enconding\r\n cipher_string = ''\r\n shift_dict = self.build_shift_dict(shift)\r\n\r\n # Start encoding\r\n for char in self.message_text:\r\n cond = shift_dict.get(char, 0)\r\n if cond:\r\n cipher_string += cond\r\n else:\r\n cipher_string += char\r\n\r\n # Return and terminate\r\n return cipher_string\r\n\r\nclass PlaintextMessage(Message):\r\n def __init__(self, text, shift):\r\n # Init class, inherit from message then create encryption\r\n # library, direction and message\r\n Message.__init__(self, text)\r\n self.shift = shift\r\n self.encryption_dict = self.build_shift_dict(shift)\r\n self.message_text_encrypted = self.apply_shift(shift)\r\n\r\n # Getter for shift value\r\n def get_shift(self):\r\n return self.shift\r\n\r\n # Getter for encryption dictionary generated from shift value\r\n def get_encryption_dict(self):\r\n return dict(self.encryption_dict)\r\n\r\n # Getter for encrypted text\r\n def get_message_text_encrypted(self):\r\n return self.message_text_encrypted\r\n\r\n # Setter to change set shift value, also rebuilt dictionary\r\n # and encrypted message\r\n def change_shift(self, shift):\r\n self.shift = shift\r\n self.encryption_dict = self.build_shift_dict(shift)\r\n self.message_text_encrypted = self.apply_shift(shift)\r\n\r\n\r\nclass CiphertextMessage(Message):\r\n # Class initialize, basically take everything from message\r\n def __init__(self, text):\r\n Message.__init__(self, text)\r\n\r\n # Brute force decryption\r\n def decrypt_message(self):\r\n # Variable to store the best shift value and result\r\n best_shift = 0\r\n best = 0\r\n\r\n # Basically iterate through all 26 possible shift values (0-25)\r\n for i in range(0,26):\r\n itr_count = 0\r\n shift_search = self.apply_shift(i).lower().split(\" \")\r\n\r\n # Clean up the split list first\r\n for word in shift_search:\r\n if is_word(self.valid_words, word):\r\n itr_count += 1\r\n\r\n # Storing our result here\r\n if itr_count > best:\r\n best = itr_count\r\n best_shift = i\r\n\r\n # Break out if we have matched every word\r\n if best == len(shift_search):\r\n break\r\n\r\n # Return the decrypted message and terminate\r\n return self.apply_shift(best_shift)\r\n\r\nif __name__ == '__main__':\r\n\r\n # # Test 1\r\n print('Test 1')\r\n text1 = 'Hello World!'\r\n en_text1 = PlaintextMessage(text1, 987).get_message_text_encrypted()\r\n test1 = CiphertextMessage(en_text1)\r\n print('The encrypted text is', en_text1)\r\n print('The original text is', test1.decrypt_message())\r\n print('Expected:', text1)\r\n print('Test 1 done')\r\n print()\r\n\r\n # Test 2\r\n print('Test 2')\r\n text2 = \"What's good my nigga? Yo down with the hood dog? Brothas fo life homie!\"\r\n en_text2 = PlaintextMessage(text2, -987686).get_message_text_encrypted()\r\n test2 = CiphertextMessage(en_text2)\r\n print('The encrypted text is', en_text2)\r\n print('The original text is', test2.decrypt_message())\r\n print('Expected:', text2)\r\n print('Test 2 done')\r\n print()\r\n\r\n # Test 3 for PlaintextMessage class\r\n print('Test 3')\r\n text3 = '“Stop!” he yelled. “You’ve got two flat tires!”'\r\n en_text3 = PlaintextMessage(text3, 15).get_message_text_encrypted()\r\n test3 = CiphertextMessage(en_text3)\r\n print('The encrypted text is', en_text3)\r\n print('The original text is', test3.decrypt_message())\r\n print('Expected:', text3)\r\n print('Test 3 done')\r\n print()\r\n\r\n # Test 4 for CiphertextMessage class\r\n print('Test 4')\r\n en_text4 = get_story_string()\r\n test4 = CiphertextMessage(en_text4)\r\n print('The encrypted text is')\r\n print(en_text4)\r\n print('The original text is')\r\n print(test4.decrypt_message())\r\n print('Test 4 done')\r\n \r\n","repo_name":"nphoang1102/6.001","sub_path":"project4/ps4b.py","file_name":"ps4b.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"70384306437","text":"import csv\r\n\r\ndef totalPoints(dictionaryPTS):\r\n total = 0\r\n for key in dictionaryPTS:\r\n total += (dictionaryPTS[key] * 100)\r\n return total\r\n\r\ndef calculateGrade(dictionaryPTS, average, row):\r\n average = 0\r\n for index in dictionaryPTS:\r\n average += (float(row[index])*dictionaryPTS[index])\r\n return average\r\n\r\n# ask for which csv file to open\r\nfilename = str(input(\"Enter the exact csv file name to open: \"))\r\nprint()\r\n\r\nindividualCheck = str(input(\"Do you wan to find a certain student (Yes or No): \"))\r\nif individualCheck == 'Yes':\r\n whichStudent = str(input(\"Which student are you looking for (last name, first name): \"))\r\n\r\nwith open(filename, 'r') as file:\r\n csvreader = csv.reader(file)\r\n heading = next(csvreader) # get the header of the csv file\r\n\r\n # initialize all dictionaries based on assignments/labs/readings/exams with the amount of pts associated with them\r\n\r\n readings = {}\r\n labs = {}\r\n exams = {}\r\n midterm = {}\r\n final = {}\r\n project = {}\r\n extraCredit = {}\r\n\r\n # initialize the number of points to -1 and the position of every item to 0\r\n pts = -1.0\r\n pos = 0\r\n\r\n # iterate through the header\r\n for item in heading:\r\n\r\n # find the number of points and only convert if points exist in the file\r\n parentheses1 = item.find(\"(\")\r\n parentheses2 = item.find(\")\")\r\n if parentheses1 > -1 or parentheses1 > -1:\r\n parentheses1 += 1\r\n pts = float(item[parentheses1: parentheses2]) / 100\r\n\r\n # find key terms in the file and associate the position and points based on the key term\r\n if \"Project\" in item:\r\n project[pos] = pts\r\n\r\n elif \"Additional\" in item:\r\n extraCredit[pos] = pts\r\n\r\n elif \"Lab\" in item:\r\n labs[pos] = pts\r\n\r\n elif \"Exam\" in item:\r\n exams[pos] = pts\r\n\r\n elif \"Midterm\" in item:\r\n midterm[pos] = pts\r\n\r\n elif \"Topic\" in item:\r\n readings[pos] = pts\r\n\r\n pos += 1\r\n \r\n # find the total number of points for each section\r\n totalLabs = totalPoints(labs)\r\n totalReading = totalPoints(readings)\r\n totalExam = totalPoints(exams)\r\n\r\n # find the avg of all grades\r\n # iterate through the rest of the file\r\n count = 0\r\n for row in csvreader:\r\n count += 1\r\n # calculate reading average\r\n avgReads = 0\r\n avgReads = calculateGrade(readings, avgReads, row)\r\n \r\n # calculate lab average\r\n avgLabs = 0\r\n avgLabs = calculateGrade(labs, avgLabs, row) \r\n avgLabs += calculateGrade(extraCredit, avgLabs, row) \r\n \r\n # calculate project 1 grade\r\n for index in project:\r\n project1 = (float(row[index])/33.33)*100\r\n\r\n # calculate exam average \r\n avgExams = {}\r\n num = 1\r\n for index in exams:\r\n if row[index] != 'N/A':\r\n avgExams[num] = (float(row[index])*exams[index])\r\n num += 1\r\n\r\n #calculate midterm grade\r\n midtermGrade = 0\r\n midtermGrade = calculateGrade(midterm, midtermGrade, row)\r\n\r\n if avgLabs > 2400:\r\n avgLabs = 2400\r\n\r\n if individualCheck == 'No':\r\n print(f\"{row[0]}, {row[1]}, {row[4]} - Readings: {avgReads:.2f}, Labs: {avgLabs:.2f}, Project 1: {project1:.2f}\")\r\n examGrade = 0\r\n for key in avgExams:\r\n print(f\"Exam {key}: {avgExams[key]:.2f}\")\r\n examGrade += avgExams[key]\r\n \r\n #calculate final grade letter\r\n finalGrade = (avgLabs/totalLabs)*15 + ((midtermGrade+examGrade+400)/totalExam)*40 + (avgReads/totalReading)*10 + (project1/200)*20\r\n finalGrade += ((\"\"\"Manually input quiz :(\"\"\") / 6) * 15\r\n print(f\"Final grade: {finalGrade}%\")\r\n print(input(\"Press Enter to continue.\"))\r\n\r\n else:\r\n if whichStudent == f\"{row[0]}, {row[1]}\":\r\n print(f\"{row[0]}, {row[1]}, {row[4]} - Readings: {avgReads:.2f}, Labs: {avgLabs:.2f}, Project 1: {project1:.2f}\")\r\n examGrade = 0\r\n for key in avgExams:\r\n print(f\"Exam {key}: {avgExams[key]:.2f}\")\r\n examGrade += avgExams[key]\r\n\r\n #calculate final grade letter\r\n finalGrade = (avgLabs/totalLabs)*15 + ((midtermGrade+examGrade+400)/totalExam)*40 + (avgReads/totalReading)*10 + (project1/200)*20\r\n finalGrade += ((\"\"\"Manually input quiz :(\"\"\") / 6) * 15\r\n print(f\"Final grade: {finalGrade}%\")\r\n\r\n print(input(\"Press Enter to continue.\"))\r\n\r\n","repo_name":"enleeyee/COSC1437GradeSystem","sub_path":"COSC1347GradeSystem.py","file_name":"COSC1347GradeSystem.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"28434904780","text":"import tensorflow as tf\nimport numpy as np\n\ndef scale_bbox(bbox_true, scale):\n x1, y1, x2, y2 = tf.split(bbox_true, 4, axis = -1)\n w, h = (x2 - x1) / 2, (y2 - y1) / 2\n cx, cy = x1 + w, y1 + h\n w *= scale\n h *= scale\n bbox_true = tf.concat([cx - w, cy - h, cx + w, cy + h], axis = -1)\n return bbox_true\n\ndef isin(bbox_true, bbox_pred, extra_length = None, mode = \"rect\"):\n \"\"\"\n Calculates center_pred in bbox_true\n \n extra_length = compare by center ± extra_length\n mode = ('rect', 'circle')\n \"\"\"\n if mode not in (\"rect\", \"circle\"):\n raise ValueError(\"unknown mode '{0}'\".format(mode))\n \n true_count = tf.shape(bbox_true)[0]\n pred_count = tf.shape(bbox_pred)[0]\n \n bbox_true = tf.reshape(tf.tile(tf.expand_dims(bbox_true, 0), [1, 1, pred_count]), [-1, 4])\n bbox_pred = tf.tile(bbox_pred, [true_count, 1])\n \n tx1, ty1, tx2, ty2 = tf.split(bbox_true, 4, axis = -1)\n px1, py1, px2, py2 = tf.split(bbox_pred, 4, axis = -1)\n \n tcx, tcy = (tx1 + tx2) / 2, (ty1 + ty2) / 2\n pcx, pcy = (px1 + px2) / 2, (py1 + py2) / 2\n if extra_length is not None:\n tx1 = tcx - extra_length\n ty1 = tcy - extra_length\n tx2 = tcx + extra_length\n ty2 = tcy + extra_length\n th, tw = (ty2 - ty1) / 2, (tx2 - tx1) / 2\n \n if mode == \"rect\":\n #flag = 0 < (tf.maximum(tf.minimum(tx2, px2) - tf.maximum(tx1, px1), 0) * tf.maximum(tf.minimum(ty2, py2) - tf.maximum(ty1, py1), 0))\n flag = tf.logical_and(tf.logical_and(tx1 < pcx, pcx < tx2), tf.logical_and(ty1 < pcy, pcy < ty2))\n else: #elif mode == \"circle\":\n flag = tf.logical_and(tf.abs(tcx - pcx) < tw, tf.abs(tcy - pcy) < th)\n return tf.reshape(flag, [true_count, pred_count])\n\ndef random_bbox(alpha = 1, image_shape = None, scale = None, clip = False, clip_object = True):\n h, w = image_shape[:2] if image_shape is not None else [1, 1]\n scale_h, scale_w = [scale, scale] if np.ndim(scale) == 0 else scale\n if scale is not None and np.any(np.greater_equal(scale, 2)):\n if image_shape is None:\n h, w = [scale_h, scale_w]\n scale_h, scale_w = [scale_h / h, scale_w / w]\n elif scale is None:\n scale_h, scale_w = [np.random.beta(alpha, alpha), np.random.beta(alpha, alpha)]\n center_x, center_y = np.random.random(), np.random.random()\n if clip_object:\n center_x = center_x * (1 - scale_w) + (scale_w / 2)\n center_y = center_y * (1 - scale_h) + (scale_h / 2)\n bbox = [center_x - (scale_w / 2), center_y - (scale_h / 2), center_x + (scale_w / 2), center_y + (scale_h / 2)]\n if clip:\n bbox = np.clip(bbox, 0, 1)\n if image_shape is not None:\n bbox = np.round(np.multiply(bbox, [w, h, w, h])).astype(np.int32)\n return bbox","repo_name":"vlarmet/flair_ign_2nd_place","sub_path":"vincent/utils/TFDetection-main/tfdet/core/bbox/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35220287295","text":"import random\nimport pygame as pg\nfrom sys import exit\n\npg.init()\n\nbg_music = pg.mixer.music.load(\"assets/audio/bgcat.mp3\")\npg.mixer.music.play(-1)\npg.mixer.music.set_volume(0.1)\ncollision_sound = pg.mixer.Sound(\"assets/audio/smw_coin.wav\")\n\nscreen = [640, 480]\n\nscreen_set = pg.display.set_mode((screen[0], screen[1]))\npg.display.set_caption(\"First Game Ever\")\nclock = pg.time.Clock()\nfont = pg.font.SysFont(\"Arial\", 30, True)\n\nsnake_size = [20, 20]\nsnake_pos = [int(screen[0] / 2 - snake_size[0]), int(screen[1] / 2 - snake_size[1])]\nvelocity = 20\ndirection = [1, 0]\nscore = 0\ntrail = []\n\napple_pos = [random.randint(200, 200), random.randint(200, 200)]\napple_size = [20, 20]\n\n\ndef drawSnake(trail):\n for i in range(len(trail)):\n pg.draw.rect(\n screen_set,\n (0, 255, 0),\n (trail[i][0], trail[i][1], snake_size[0], snake_size[1]),\n )\n for i in range(len(trail)):\n if len(trail) > score + 1:\n trail.pop(0)\n\n return trail\n\n\ndef changeDirection(key):\n return (\n [0, -1] * (key == pg.K_UP and direction != [0, 1])\n + [0, 1] * (key == pg.K_DOWN and direction != [0, -1])\n + [-1, 0] * (key == pg.K_LEFT and direction != [1, 0])\n + [1, 0] * (key == pg.K_RIGHT and direction != [-1, 0])\n + direction\n * (\n key not in (pg.K_UP, pg.K_DOWN, pg.K_LEFT, pg.K_RIGHT)\n or key == pg.K_UP\n and direction == [0, 1]\n or key == pg.K_DOWN\n and direction == [0, -1]\n or key == pg.K_LEFT\n and direction == [1, 0]\n or key == pg.K_RIGHT\n and direction == [-1, 0]\n )\n )\n\n\ndef restartGame():\n global snake_pos, snake_size, velocity, direction, score, trail, apple_pos, apple_size\n snake_pos = [int(screen[0] / 2 - snake_size[0]), int(screen[1] / 2 - snake_size[1])]\n velocity = 20\n direction = [1, 0]\n score = 0\n trail = []\n apple_pos = [random.randint(200, 200), random.randint(200, 200)]\n apple_size = [20, 20]\n\n\nwhile True:\n clock.tick(20)\n text = \"score: \" + str(score)\n rendered_text = font.render(text, True, (0, 0, 0))\n screen_set.fill((255, 255, 255))\n event_count = 0\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n exit()\n if event.type == pg.KEYDOWN and not event_count:\n direction = changeDirection(event.key)\n event_count += 1\n\n snake_pos = [\n snake_pos[0] + direction[0] * velocity,\n snake_pos[1] + direction[1] * velocity,\n ]\n if snake_pos[0] < 0 or snake_pos[0] >= screen[0]:\n snake_pos[0] = (screen[0]) * (snake_pos[0] < 1)\n if snake_pos[1] < 0 or snake_pos[1] >= screen[1]:\n snake_pos[1] = (screen[1]) * (snake_pos[1] < 1)\n\n trail.append(snake_pos)\n drawSnake(trail)\n\n snake = pg.draw.rect(\n screen_set,\n (0, 200, 0),\n (snake_pos[0], snake_pos[1], snake_size[0], snake_size[1]),\n )\n apple = pg.draw.rect(\n screen_set,\n (255, 0, 0),\n (apple_pos[0], apple_pos[1], apple_size[0], apple_size[1]),\n )\n\n if snake.colliderect(apple):\n apple_pos = [\n random.randint(0, 640 - apple_size[0]),\n random.randrange(0, 480 - apple_size[1]),\n ]\n score += 1\n collision_sound.play()\n if snake_pos in trail[:-1]:\n while True:\n reseted = 0\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n exit()\n if event.type == pg.KEYDOWN and event.key == pg.K_r:\n restartGame()\n reseted = 1\n if reseted:\n break\n\n screen_set.blit(rendered_text, (10, 10))\n\n pg.display.update()\n","repo_name":"PaulimRiss/Snake-Game","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72025974276","text":"from django.conf import settings\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils import translation\nfrom surveys.models import Respondent\nfrom django_countries.widgets import CountrySelectWidget\nfrom django_countries.fields import CountryField\nfrom surveys.models import SurveyInstanceItem, RatioSurveyInstanceItem\n\n\nclass AddRespondentForm(forms.Form):\n email = forms.EmailField(max_length = 150, label=_(\"Email address\"), widget=forms.TextInput(attrs={'placeholder': _('Required')}))\n first_name = forms.CharField(max_length = 255, label=_(\"First name\"), required=False, widget=forms.TextInput(attrs={'placeholder': _('Optional')}))\n last_name = forms.CharField(max_length = 255, label=_(\"Last name\"), required=False, widget=forms.TextInput(attrs={'placeholder': _('Optional')}))\n #receives_surveys = forms.BooleanField(label=\"Send surveys to this employee\", required=False, widget=forms.CheckboxInput(attrs={'default': 'true'}))\n\n def clean_email(self):\n if Respondent.objects.filter(email=self.cleaned_data['email']).exists():\n raise forms.ValidationError(\n _(\"An employee with that email already exists in our database (%(taken_email)s).\"),\n code='invalid',\n params={'taken_email': self.cleaned_data['email']}\n )\n return self.cleaned_data['email']\n\n\nclass EditRespondentForm(AddRespondentForm):\n def __init__(self, *args, **kwargs):\n self.respondent_id = kwargs.pop('respondent_id', None)\n super(EditRespondentForm, self).__init__(*args, **kwargs)\n\n def clean_email(self):\n if Respondent.objects.filter(email=self.cleaned_data['email']).exists():\n existing_respondent = Respondent.objects.get(email=self.cleaned_data['email'])\n if existing_respondent.id != self.respondent_id:\n raise forms.ValidationError(\n _(\"An employee with that email already exists in our database (%(taken_email)s).\"),\n code='invalid',\n params={'taken_email': self.cleaned_data['email']}\n )\n return self.cleaned_data['email']\n\nclass EditSurveySettingsForm(forms.Form):\n\n is_active = forms.BooleanField(\n label=_(\"Activate tracking?\"),\n required=False,\n widget=forms.CheckboxInput(attrs={'default': 'true'})\n )\n survey_interval = forms.ChoiceField(\n label=_(\"How often should employees in your organization be surveyed?\"),\n required=True,\n choices=(\n (90, _('Every 3 months (reccomended)')),\n (180, _('Every 6 months')),\n (365, _('Every year')),\n )\n )\n surveys_remain_open_days = forms.ChoiceField(\n label=_(\"How much time should employees be given to answer a survey?\"),\n required=True,\n choices=(\n (7, _('One week (recommended)')),\n (14, _('Two weeks')),\n (21, _('Three weeks')),\n (30, _('One month')),\n )\n )\n survey_language_preference = forms.ChoiceField(\n label=_(\"Set default survey language (this affects all instruments):\"),\n required = True,\n choices=settings.LANGUAGES,\n initial=translation.get_language()\n \n )\n\nclass ConsentToAnswerForm(forms.Form):\n consent_to_answer = forms.BooleanField(\n label=_(\"I consent to the collection of information in this survey\"),\n required=True,\n widget=forms.CheckboxInput(attrs={'default': 'false'})\n )\n def clean_consent_to_answer(self):\n if self.cleaned_data['consent_to_answer'] != True:\n raise forms.ValidationError(\n _(\"Please indicate that you consent to answer this survey to continue.\"),\n code='invalid',\n params={'consent_to_answer': self.cleaned_data['consent_to_answer']}\n )\n return self.cleaned_data['consent_to_answer']\n\nclass CustomChoiceField(forms.ChoiceField):\n def __init__(self, *args, **kwargs):\n self.min_value_description = kwargs.pop('min_value_description', _('Disagree'))\n self.max_value_description = kwargs.pop('max_value_description', _('Agree'))\n self.opt_out = kwargs.pop('opt_out', False)\n super(CustomChoiceField, self).__init__(*args, **kwargs)\n\nclass AnswerSurveyForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n #self.user = kwargs.pop('user',None)\n self.items = kwargs.pop('items', None)\n\n assert self.items is not None, \\\n \"tried to instantiate AnswerSurveyForm without providing 'items'\"\n assert isinstance(self.items, list), \\\n \"the 'items' variable provdided to AnswerSurveyForm must be a list but was %s.\"%(type(self.items))\n\n for item in self.items:\n assert isinstance(item, SurveyInstanceItem), \\\n \"survey_instance_items in 'items' provdided to AnswerSurveyForm must be of the type SurveyInstanceItem but at least one was %s:\\n --- \\\"%s\\\".\"\\\n %(type(item), item)\n\n super(AnswerSurveyForm, self).__init__(*args, **kwargs)\n\n for item in self.items:\n field_name = 'item_%s'%(item.pk)\n if isinstance(item, RatioSurveyInstanceItem):\n CHOICES = [(number, str(number)) for number in range (item.survey_item.item_dimension.scale.min_value, item.survey_item.item_dimension.scale.max_value+1)]\n if item.survey_item.item_dimension.scale.opt_out == True:\n CHOICES.append((\"chose_to_not_answer\", _(\"I don't know, or I don't want to answer\")))\n \n self.fields[field_name] = CustomChoiceField(\n min_value_description = item.survey_item.item_dimension.scale.min_value_description,\n max_value_description = item.survey_item.item_dimension.scale.max_value_description,\n opt_out = item.survey_item.item_dimension.scale.opt_out,\n label=item.survey_item.item_formulation,\n choices=CHOICES,\n help_text=item.survey_item.item_dimension.scale.instruction,\n label_suffix='',\n widget=forms.RadioSelect(attrs={\n 'class': 'form-check-input'\n })\n )\n\n #elif other types of scales\n else:\n logger.warning(\n \"%s %s: %s: tried to make a form field for the supplied item, but its subclass (%s) was not recognized:\\n---\\\"%s\\\"\"\\\n %(datetime.datetime.now().strftime('[%d/%m/%Y %H:%M:%S]'), 'WARNING: ', __name__, type(item), item)\n )\n","repo_name":"vetleen/meandmyteam","sub_path":"surveys/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31324941249","text":"\"\"\"\n第一部分:导入包\n从sklearn.cluster机器学习聚类包中导入KMeans聚类\n\"\"\"\n# coding=utf-8 \nfrom sklearn.cluster import Birch\nfrom sklearn.cluster import KMeans\n \n\"\"\"\n第二部分:数据集\nX表示二维矩阵数据,篮球运动员比赛数据\n总共20行,每行两列数据\n第一列表示球员每分钟助攻数:assists_per_minute\n第二列表示球员每分钟得分数:points_per_minute\n\"\"\"\n \nX = [[0.0888, 0.5885],\n [0.1399, 0.8291],\n [0.0747, 0.4974],\n [0.0983, 0.5772],\n [0.1276, 0.5703],\n [0.1671, 0.5835],\n [0.1906, 0.5276],\n [0.1061, 0.5523],\n [0.2446, 0.4007],\n [0.1670, 0.4770],\n [0.2485, 0.4313],\n [0.1227, 0.4909],\n [0.1240, 0.5668],\n [0.1461, 0.5113],\n [0.2315, 0.3788],\n [0.0494, 0.5590],\n [0.1107, 0.4799],\n [0.2521, 0.5735],\n [0.1007, 0.6318],\n [0.1067, 0.4326],\n [0.1956, 0.4280] \n ]\n \n#输出数据集\nprint(X)\n \n \n\"\"\"\n第三部分:KMeans聚类\nclf = KMeans(n_clusters=3) 表示类簇数为3,聚成3类数据,clf即赋值为KMeans\ny_pred = clf.fit_predict(X) 载入数据集X,并且将聚类的结果赋值给y_pred\n\"\"\"\n \nclf = KMeans(n_clusters=3)\ny_pred = clf.fit_predict(X)\n \n#输出完整Kmeans函数,包括很多省略参数\nprint(clf)\n#输出聚类预测结果,20行数据,每个y_pred对应X一行或一个球员,聚成3类,类标为0、1、2\nprint(y_pred)\n \n \n\"\"\"\n第四部分:可视化绘图\nPython导入Matplotlib包,专门用于绘图\nimport matplotlib.pyplot as plt 此处as相当于重命名,plt用于显示图像\n\"\"\"\n \nimport numpy as np\nimport matplotlib.pyplot as plt\n \n#获取第一列和第二列数据 使用for循环获取 n[0]表示X第一列\nx = [n[0] for n in X]\nprint(x)\ny = [n[1] for n in X]\nprint(y)\n \n#绘制散点图 参数:x横轴 y纵轴 c=y_pred聚类预测结果 marker类型 o表示圆点 *表示星型 x表示点\nplt.scatter(x, y, c=y_pred, marker='x')\n \n#绘制标题\nplt.title(\"Kmeans-Basketball Data\")\n \n#绘制x轴和y轴坐标\nplt.xlabel(\"assists_per_minute\")\nplt.ylabel(\"points_per_minute\")\n \n#设置右上角图例\nplt.legend([\"A\",\"B\",\"C\"])\n \n#显示图形\nplt.show()\n","repo_name":"eastmountyxz/Python-for-Data-Mining","sub_path":"blog02-Kmeans/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"zh","doc_type":"code","stars":120,"dataset":"github-code","pt":"62"} +{"seq_id":"9839838708","text":"import pandas as pd\nimport os\nimport numpy as np\nimport pickle\n\n# Define input and output data\nseq_len = 3\ndata_x = []\ndata_y = []\n\nfor flight in os.listdir('../allFlightsData'):\n try:\n print(flight)\n data = pd.read_csv(os.path.join('../allFlightsData', flight))\n\n # Scaling features\n data['altitude'] = data['altitude'] / 44000\n data['ground_speed'] = data['ground_speed'] / 750\n\n X = data[['latitude', 'longitude', 'altitude', 'ground_speed', 'heading_angle_sine', 'heading_angle_cosine']]\n y = data.transit_time.values\n y = y.reshape(-1, 1)\n data = np.concatenate((X, y), axis=1)\n\n for j in range(len(data) - seq_len):\n seq_in = data[j:j + seq_len, 0:-1]\n seq_out = data[j + seq_len - 1, -1]\n\n data_x.append(seq_in)\n data_y.append(seq_out)\n\n except (Exception,):\n continue\n\nwith open(\"../sequentialData/data_x_new.pkl\", \"wb\") as f:\n pickle.dump(np.array(data_x), f)\n\nwith open(\"../sequentialData/data_y_new.pkl\", \"wb\") as f:\n pickle.dump(np.array(data_y), f)\n","repo_name":"longtrd112/TanSonNhat_Thesis","sub_path":"lstm_keras/utils/generate_sequential_data.py","file_name":"generate_sequential_data.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8729110718","text":"from django.shortcuts import render\nfrom . import models\nfrom service.models import Service, SpecialService\nfrom ketong.navs import dynamic_navs\n\n\ndef show_company(request, key):\n\n if key == 'profile':\n profile = models.CompanyInfo.objects.last()\n context = {\n 'profile': profile,\n }\n context.update(dynamic_navs())\n return render(request, 'about_us/company.html', context)\n\n if key == 'culture':\n culture = models.CompanyCulture.objects.last()\n context = {\n 'culture': culture,\n }\n context.update(dynamic_navs())\n return render(request, 'about_us/company.html', context)\n\n if key == 'show':\n show = models.CompanyShow.objects.last()\n context = {\n 'show': show,\n }\n context.update(dynamic_navs())\n return render(request, 'about_us/company.html', context)\n\n if key == 'honor':\n honor = models.Honor.objects.last()\n context = {\n 'honor': honor,\n }\n context.update(dynamic_navs())\n return render(request, 'about_us/company.html', context)\n\n if key == 'video':\n videos = models.Video.objects.all()\n context = {\n 'videos': videos,\n }\n context.update(dynamic_navs())\n return render(request, 'about_us/company.html', context)","repo_name":"mooremok/ketongweb","sub_path":"apps/about_us/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39941487538","text":"def decode_input(text_in):\n \"\"\" Decodes `text_in`\n If text_in is is a string, \n then decode it as utf-8 string.\n If text_in is is a list of strings,\n then decode each string of it, \n then combine them into one outpust string. \n \"\"\"\n \n if type(text_in) == list:\n text_out = u' '.join([t.decode('utf-8') for t in text_in])\n else:\n text_out = text_in.decode('utf-8')\n return text_out","repo_name":"gr33ndata/dysl","sub_path":"dysl/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"62"} +{"seq_id":"73762303236","text":"import xlwings as xw\r\nimport pandas as pd\r\nimport os\r\n\r\n# 全路径\r\nfull_path = os.getcwd() + '/files'\r\n# 启动Excel程序\r\napp = xw.App(visible=False, add_book=False)\r\n# 打开指定工作薄\r\nworkbook = app.books.open(os.path.join(full_path, '商品分类.xlsx'))\r\n# 异常捕获\r\ntry:\r\n # 取得工作薄中所有工作表\r\n sheet_list = workbook.sheets\r\n # 创建一个空的DataFrame\r\n empty_table = pd.DataFrame()\r\n # 遍历工作表\r\n for i, j in enumerate(sheet_list):\r\n # 当前当前工作表的数据\r\n t_values = j.range('A1').options(pd.DataFrame, header=1, index=False,\r\n expand='table').value\r\n # 调整列的顺序\r\n c_data = t_values.reindex(columns=['商品产地', '序号', '商品sku', '商品名称',\r\n '销售单价', '商品编号', '生产日期', '库存量'])\r\n # 将调整列顺序后的数据合并的创建的DataFrame对象中\r\n empty_table = empty_table.append(c_data, ignore_index=True)\r\n # 根据指定列筛选数据\r\n empty_table = empty_table.groupby('商品产地')\r\n # 创建一个新的工作薄\r\n new_workbook = xw.books.add()\r\n # 异常捕获\r\n try:\r\n # 遍历筛选的数据,idx对应商品产地,group对应物品所有明细数据\r\n for idx, group in empty_table:\r\n # 在工作薄中新增工作表,以商品产地命名工作表\r\n new_worksheet = new_workbook.sheets.add(idx)\r\n # 在新工作表中写入数据\r\n new_worksheet['A1'].options(index=False).value = group\r\n # 获取当前工作表数据区域右下角的单元格\r\n last_cell = new_worksheet['A1'].expand('table').last_cell\r\n # 获取数据区域最后一行的行号\r\n last_row = last_cell.row\r\n # 获取数据区域最后一列的列号\r\n last_column = last_cell.column\r\n # 将数据区域最后一列的列号(数字)转换为该列的列标(字母)\r\n last_column_letter = chr(64 + last_column)\r\n # 获取数据区域右下角单元格下方的单元格的位置\r\n sum_cell_name = f'{last_column_letter}{last_row + 1}'\r\n # 获取数据区域右下角单元格的位置\r\n sum_last_row_name = f'{last_column_letter}{last_row}'\r\n # 根据单元格位置构造Excel公式,对库存量进行求和\r\n formula = f'SUM({last_column_letter}2:{sum_last_row_name})'\r\n # 将求和公式写入数据区域右下角单元格下方的单元格中\r\n new_worksheet[sum_cell_name].formula = formula\r\n # 自动调整工作表的行高和列宽\r\n new_worksheet.autofit()\r\n # 保存新工作薄\r\n new_workbook.save(os.path.join(full_path, '商品产地.xlsx'))\r\n # 不管前面代码执行是否发生异常,都执行该语句块的语句\r\n finally:\r\n # 关闭新工作薄\r\n new_workbook.close()\r\n# 不管前面代码执行是否发生异常,都执行该语句块的语句\r\nfinally:\r\n # 关闭工作薄\r\n workbook.close()\r\n# 退出Excel程序\r\napp.quit()\r\n","repo_name":"liuyuzhou/pythonoperexcel","sub_path":"chapter8/books_data_select.py","file_name":"books_data_select.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"30532607219","text":"#This example demonstrates the LEDBarDisplay, LEDDisplay, and SymbolDisplay\n#classes.\n\nimport tkinter as tk\nimport sys \nsys.path.append ('../widgets') \nimport LEDDisplay as ld\nimport SymbolDisplay as sd\nimport LEDBarDisplay as lbd\n\nclass LEDBarDemo(tk.Tk):\n def __init__(self):\n super().__init__()\n self.lbd1=lbd.LEDBarDisplay(self)\n self.button1=tk.Button(self, text=\"Change Color\", command=self.color_me)\n self.button2=tk.Button(self, text=\"Change Orientation\", \\\n command=self.rotate_me)\n self.button_quit=tk.Button(self, text=\"Quit\", command=self.destroy)\n\n self.lbd1.pack()\n self.button1.pack()\n self.button2.pack()\n self.button_quit.pack()\n\n self.color1=\"blue\"\n self.orient1=\"vertical\"\n self.lbd1.set_all_color(self.color1)\n\n tk.mainloop()\n\n def color_me(self):\n if (self.color1==\"blue\"):\n self.color1=\"purple\"\n else:\n self.color1=\"blue\"\n self.lbd1.set_all_color(self.color1)\n\n def rotate_me(self):\n if (self.orient1==\"vertical\"):\n self.orient1=\"horizontal\"\n else:\n self.orient1=\"vertical\"\n self.lbd1.set_orientation(self.orient1)\n \n\nif __name__==\"__main__\":\n mygui=LEDBarDemo()\n","repo_name":"amitofsk/dandy","sub_path":"src/examples/LEDBarDemo.py","file_name":"LEDBarDemo.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"20057096149","text":"# A pole is attached by an un-actuated joint to a cart,\n# which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart.\n# The pendulum starts upright, and the goal is to prevent it from falling over.\n# A reward of +1 is provided for every timestep that the pole remains upright.\n# The episode ends when the pole is more than 15 degrees from vertical,\n# or the cart moves more than 2.4 units from the center.\n\n\nimport gym\nimport random\nfrom keras import Sequential\nfrom collections import deque\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nenv = gym.make('CartPole-v0')\nenv.seed(0)\nnp.random.seed(0)\n\n\nclass DQN:\n\n \"\"\" Implementation of deep q learning algorithm \"\"\"\n\n def __init__(self, action_space, state_space):\n\n self.action_space = action_space\n self.state_space = state_space\n self.epsilon = 1\n self.gamma = .95\n self.batch_size = 64\n self.epsilon_min = .01\n self.epsilon_decay = .995\n self.learning_rate = 0.001\n self.memory = deque(maxlen=10000)\n self.model = self.build_model()\n\n def build_model(self):\n\n model = Sequential()\n model.add(Dense(24, input_shape=(self.state_space,), activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(self.action_space, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n return model\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, state):\n\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_space)\n act_values = self.model.predict(state)\n return np.argmax(act_values[0])\n\n def replay(self):\n\n if len(self.memory) < self.batch_size:\n return\n\n minibatch = random.sample(self.memory, self.batch_size)\n states = np.array([i[0] for i in minibatch])\n actions = np.array([i[1] for i in minibatch])\n rewards = np.array([i[2] for i in minibatch])\n next_states = np.array([i[3] for i in minibatch])\n dones = np.array([i[4] for i in minibatch])\n\n states = np.squeeze(states)\n next_states = np.squeeze(next_states)\n\n targets = rewards + self.gamma*(np.amax(self.model.predict_on_batch(next_states), axis=1))*(1-dones)\n targets_full = self.model.predict_on_batch(states)\n\n ind = np.array([i for i in range(self.batch_size)])\n targets_full[[ind], [actions]] = targets\n\n self.model.fit(states, targets_full, epochs=1, verbose=0)\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n\ndef train_dqn(episode):\n\n loss = []\n agent = DQN(env.action_space.n, env.observation_space.shape[0])\n for e in range(episode):\n state = env.reset()\n state = np.reshape(state, (1, 4))\n score = 0\n max_steps = 1000\n for i in range(max_steps):\n env.render()\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n score += reward\n next_state = np.reshape(next_state, (1, 4))\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n agent.replay()\n if done:\n print(\"episode: {}/{}, score: {}\".format(e, episode, score))\n break\n loss.append(score)\n return loss\n\n\ndef random_policy(episode, step):\n\n for i_episode in range(episode):\n env.reset()\n for t in range(step):\n env.render()\n action = env.action_space.sample()\n state, reward, done, info = env.step(action)\n if done:\n print(\"Episode finished after {} timesteps\".format(t+1))\n break\n print(\"Starting next episode\")\n\n\nif __name__ == '__main__':\n\n ep = 100\n loss = train_dqn(ep)\n plt.plot([i+1 for i in range(0, ep, 2)], loss[::2])\n plt.show()\n","repo_name":"shivaverma/OpenAIGym","sub_path":"cart-pole/CartPole-v0.py","file_name":"CartPole-v0.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"62"} +{"seq_id":"8609814684","text":"\"\"\"\n\n@author: Bingwei Chen\n\n@time: 8/4/17\n\n@desc: report_table route 电动车报修\n\n带有过滤\n\n\"\"\"\n\nfrom flask import Blueprint\nfrom flask import jsonify\nfrom flask import request\n\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\n# from playhouse.shortcuts import model_to_dict\nfrom server.utility.json_utility import models_to_json\nfrom server.service import report_table_service\n\nPREFIX = '/report_table'\n\nreport_table_app = Blueprint(\"report_table_app\", __name__, url_prefix=PREFIX)\n\n\n# 电动车报修\n@report_table_app.route('', methods=['PUT'])\n@jwt_required\ndef add():\n \"\"\"\n eg = {\n # \"username\": \"bingwei\",\n \"appointment\": 17,\n \"address\": \"\",\n \"comment\": \"\",\n \"phone\": 111111,\n }\n\n :return:\n :rtype:\n \"\"\"\n username = get_jwt_identity()\n data = request.get_json()\n report_table = report_table_service.add(\n appointment=data.pop(\"appointment\"),\n user=username,\n address=data.pop(\"address\"),\n comment=data.pop(\"comment\"),\n phone=data.pop(\"phone\"),\n )\n # report_table = model_to_dict(report_table)\n # 过滤输出内容\n report_table = {\n \"username\": report_table.user.username,\n \"appointment\": report_table.appointment.id,\n \"address\": report_table.address,\n \"comment\": report_table.comment,\n \"phone\": report_table.phone,\n \"date\": report_table.date\n }\n return jsonify({'response': report_table}), 200\n\n\n# 获取报修记录\n@report_table_app.route('/all', methods=['GET'])\n@jwt_required\ndef get_all():\n username = get_jwt_identity()\n # username = request.args.get(\"username\")\n report_tables = report_table_service.get_all(\n user=username\n )\n report_tables = models_to_json(report_tables, recurse=False)\n # for i in range(len(report_tables)):\n # report_tables[i][\"user\"] = report_tables[i][\"user\"][\"username\"]\n # report_tables[i][\"appointment\"].pop(\"user\")\n return jsonify({'response': report_tables}), 200\n","repo_name":"bingweichen/GOKU","sub_path":"backend/server/route/report_table_route.py","file_name":"report_table_route.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"44165839334","text":"import json\nimport uuid\nfrom datetime import datetime\nfrom typing import List, Optional\n\nimport validators\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom pydantic import BaseModel, Field\nfrom sqlalchemy.orm import Session\n\nfrom app.core.dependencies import depends_db\nfrom app.dynamic.config.models import Api, EndpointConfig\nfrom app.dynamic.converter import Converter\nfrom app.dynamic.endpoints.endpoint import Endpoint, EndpointResolver\nfrom app.dynamic.event_dispatcher import EventDispatcher\nfrom app.dynamic.models_resolver import ModelsResolver\nfrom app.dynamic.utils.response import ResponseOK\nfrom app.extensions.change_logger.db.tables import ChangeLogTable\nfrom app.extensions.users.db.tables import IS_ACTIVE, UsersTable\nfrom app.extensions.users.dependencies import (\n depends_current_active_user_with_permission_curried,\n depends_user_repository,\n)\nfrom app.extensions.users.permissions import UserManagementPermissions\nfrom app.extensions.users.repository.user_repository import UserRepository\n\n\nclass EditUser(BaseModel):\n Gebruikersnaam: Optional[str] = Field(None, nullable=True)\n Email: Optional[str] = Field(None, nullable=True)\n Rol: Optional[str] = Field(None, nullable=True)\n IsActive: Optional[bool] = Field(None, nullable=True)\n\n\nclass EditUserEndpointHandler:\n def __init__(\n self,\n db: Session,\n repository: UserRepository,\n logged_in_user: UsersTable,\n user_uuid: uuid.UUID,\n allowed_roles: List[str],\n object_in: EditUser,\n ):\n self._db: Session = db\n self._repository: UserRepository = repository\n self._logged_in_user: UsersTable = logged_in_user\n self._user_uuid: uuid.UUID = user_uuid\n self._allowed_roles: List[str] = allowed_roles\n self._object_in: EditUser = object_in\n self._timepoint: datetime = datetime.utcnow()\n\n def handle(self):\n changes: dict = self._object_in.dict(exclude_unset=True)\n if not changes:\n raise HTTPException(400, \"Nothing to update\")\n\n user: Optional[UsersTable] = self._repository.get_by_uuid(self._user_uuid)\n if not user:\n raise ValueError(f\"User does not exist\")\n\n if self._object_in.Email:\n same_email_user: Optional[UsersTable] = self._repository.get_by_email(self._object_in.Email)\n if same_email_user and same_email_user.UUID != user.UUID:\n raise ValueError(f\"Email already in use\")\n\n user_before_dict: dict = user.to_dict_safe()\n log_before: str = json.dumps(user_before_dict)\n\n # We handle IsActive separately as that is not really a column\n handle_is_active: Optional[bool] = changes.pop(\"IsActive\", None)\n if handle_is_active == True:\n user.Status = IS_ACTIVE\n elif handle_is_active == False:\n user.Status = \"\"\n\n for key, value in changes.items():\n setattr(user, key, value)\n\n if not validators.email(user.Email):\n raise ValueError(\"Invalid email\")\n if user.Rol not in self._allowed_roles:\n raise ValueError(\"Invalid Rol\")\n\n user_after_dict: dict = user.to_dict_safe()\n\n change_log: ChangeLogTable = ChangeLogTable(\n Created_Date=self._timepoint,\n Created_By_UUID=self._logged_in_user.UUID,\n Action_Type=\"edit_user\",\n Action_Data=self._object_in.json(),\n Before=log_before,\n After=json.dumps(user_after_dict),\n )\n\n self._db.add(change_log)\n self._db.add(user)\n self._db.flush()\n self._db.commit()\n\n return ResponseOK(message=\"OK\")\n\n\nclass EditUserEndpoint(Endpoint):\n def __init__(self, path: str, allowed_roles: List[str]):\n self._path: str = path\n self._allowed_roles: List[str] = allowed_roles\n\n def register(self, router: APIRouter) -> APIRouter:\n def fastapi_handler(\n user_uuid: uuid.UUID,\n object_in: EditUser,\n logged_in_user: UsersTable = Depends(\n depends_current_active_user_with_permission_curried(UserManagementPermissions.can_edit_user),\n ),\n db: Session = Depends(depends_db),\n repository: UserRepository = Depends(depends_user_repository),\n ) -> ResponseOK:\n handler: EditUserEndpointHandler = EditUserEndpointHandler(\n db,\n repository,\n logged_in_user,\n user_uuid,\n self._allowed_roles,\n object_in,\n )\n return handler.handle()\n\n router.add_api_route(\n self._path,\n fastapi_handler,\n methods=[\"POST\"],\n response_model=ResponseOK,\n summary=f\"Edit user\",\n description=None,\n tags=[\"User\"],\n )\n\n return router\n\n\nclass EditUserEndpointResolver(EndpointResolver):\n def get_id(self) -> str:\n return \"edit_user\"\n\n def generate_endpoint(\n self,\n event_dispatcher: EventDispatcher,\n converter: Converter,\n models_resolver: ModelsResolver,\n endpoint_config: EndpointConfig,\n api: Api,\n ) -> Endpoint:\n resolver_config: dict = endpoint_config.resolver_data\n\n path: str = endpoint_config.prefix + resolver_config.get(\"path\", \"\")\n if not \"{user_uuid}\" in path:\n raise RuntimeError(\"Missing {user_uuid} argument in path\")\n allowed_roles: List[str] = resolver_config.get(\"allowed_roles\")\n\n return EditUserEndpoint(path, allowed_roles)\n","repo_name":"Provincie-Zuid-Holland/Omgevingsbeleid-API","sub_path":"app/extensions/users/endpoints/edit_user.py","file_name":"edit_user.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"7951329585","text":"import re\nimport validators\n\ndef actionable_id(identifier_string, return_resolver=True):\n if validators.url(identifier_string) and \"/staff-profiles/\" in identifier_string.lower():\n return {\n \"url\": identifier_string,\n \"profile\": identifier_string.split(\"?\")[0]\n }\n\n if validators.email(identifier_string):\n return {\n \"email\": identifier_string\n }\n\n identifiers = {\n \"doi\": {\n \"pattern\": r\"10.\\d{4,9}\\/[\\S]+$\",\n \"resolver\": \"https://doi.org/\"\n },\n \"orcid\": {\n \"pattern\": r\"\\d{4}-\\d{4}-\\d{4}-\\w{4}\",\n \"resolver\": \"https://orcid.org/\"\n }\n }\n for k,v in identifiers.items():\n search = re.search(v[\"pattern\"], identifier_string)\n if search:\n d_identifier = {\n k: search.group()\n }\n if return_resolver and v[\"resolver\"] is not None:\n d_identifier[\"url\"] = f\"{v['resolver']}{search.group().upper()}\"\n\n return d_identifier\n\n return \n\ndef chunks(dict_list, chunk_size=1000):\n for i in range(0, len(dict_list), chunk_size):\n yield dict_list[i:i+chunk_size]\n\ndef doi_from_string(str_value):\n checker = re.findall(r'(10[.][0-9]{4,}[^\\s\"/<>]*/[^\\s\"<>]+)', str_value)\n link_part_pointers = [\n \"/abstract\",\n \"/full\",\n \"/summary\"\n ]\n \n for doi_string in checker:\n for part in link_part_pointers:\n if part in doi_string:\n doi_string.replace(part, '')\n if doi_string[-1] == \".\":\n doi_string = doi_string[0:-1]\n \n return checker\n","repo_name":"skybristol/pylinkedcmd","sub_path":"pylinkedcmd/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"4271032897","text":"import frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import cint\nfrom pypika.functions import Count\nfrom pypika.queries import Query\n\n\nclass HDArticleCategory(Document):\n\t@staticmethod\n\tdef get_list_select(query: Query):\n\t\tQBCategory = frappe.qb.DocType(\"HD Article Category\")\n\t\tQBArticle = frappe.qb.DocType(\"HD Article\")\n\t\tcount_article = (\n\t\t\tfrappe.qb.from_(QBArticle)\n\t\t\t.select(Count(\"*\"))\n\t\t\t.as_(\"count_article\")\n\t\t\t.where(QBArticle.category == QBCategory.name)\n\t\t)\n\t\tquery = query.select(QBCategory.star).select(count_article)\n\t\treturn query\n\n\tdef before_save(self):\n\t\tif self.idx == -1 and self.status == \"Published\":\n\t\t\t# index is only set if its not set already, this allows defining\n\t\t\t# index at the time of creation itself if not set the index is set\n\t\t\t# to the last index + 1, i.e. the category is added at the end\n\t\t\tself.idx = cint(\n\t\t\t\tfrappe.db.count(\n\t\t\t\t\t\"HD Article Category\", {\"parent_category\": self.parent_category}\n\t\t\t\t)\n\t\t\t)\n\n\tdef archive(self):\n\t\tself.idx = -1\n\t\tself.status = \"Archived\"\n\t\tself.save()\n\n\tdef unarchive(self):\n\t\tself.status = \"Published\"\n\t\tself.save()\n\n\tdef get_breadcrumbs(self):\n\t\tbreadcrumbs = [{\"name\": self.name, \"label\": self.category_name}]\n\t\tcurrent_category = self\n\t\twhile current_category.parent_category:\n\t\t\tcurrent_category = frappe.get_doc(\n\t\t\t\t\"HD Article Category\", current_category.parent_category\n\t\t\t)\n\t\t\tbreadcrumbs.append(\n\t\t\t\t{\"name\": current_category.name, \"label\": current_category.category_name}\n\t\t\t)\n\t\treturn breadcrumbs[::-1]\n","repo_name":"frappe/helpdesk","sub_path":"helpdesk/helpdesk/doctype/hd_article_category/hd_article_category.py","file_name":"hd_article_category.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":350,"dataset":"github-code","pt":"62"} +{"seq_id":"33151532113","text":"import pygame\nimport pygame_gui\nfrom Cell import Cells\nfrom random import randrange\nfrom Smiley import Smiley\nfrom StatisticsInMinesweeper import StatisticsInMinesweeper\nimport sqlite3\n\n\nclass Minesweeper:\n def __init__(self, mines_count, count_cells, complexity):\n self.CELL_SIZE = 30\n self.complexity = complexity\n self.mines_count = mines_count\n self.flag_count = mines_count\n self.count_cells = count_cells\n self.create_window()\n self.manager = pygame_gui.UIManager((self.width, self.height))\n self.smiley = Smiley(self.width)\n self.create_show_statistics_button()\n self.start()\n\n def create_window(self):\n pygame.init()\n pygame.display.set_caption('Minesweeper')\n self.time = 0\n self.first_action = True\n self.stop_game = False\n self.width = self.CELL_SIZE * self.count_cells[0]\n self.height = self.CELL_SIZE * self.count_cells[1] + 80\n self.size = self.width, self.height\n self.screen = pygame.display.set_mode(self.size)\n self.screen.fill(pygame.Color((180, 180, 180)))\n self.create_board()\n self.MYEVENTTYPE = pygame.USEREVENT + 1\n pygame.time.set_timer(self.MYEVENTTYPE, 100)\n\n def render(self):\n for i in range(self.count_cells[1]):\n y = i * self.CELL_SIZE + 40\n for j in range(self.count_cells[0]):\n x = j * self.CELL_SIZE\n pygame.draw.rect(self.screen, pygame.Color('black'),\n pygame.Rect((x, y), (self.CELL_SIZE, self.CELL_SIZE)), width=1)\n self.draw_flag_count()\n self.draw_time()\n self.draw_smiley()\n\n def draw_flag_count(self):\n width = 60\n height = 40\n pygame.draw.rect(self.screen, pygame.Color('black'),\n pygame.Rect((0, 0), (width, height)))\n font = pygame.font.SysFont('gothic', 30)\n text = font.render(str(self.flag_count).rjust(2, '0'), True, (255, 0, 0))\n text_x = width // 2 - text.get_width() // 2\n text_y = height // 2 - text.get_height() // 2\n self.screen.blit(text, (text_x, text_y))\n\n def draw_time(self):\n width = 60\n height = 40\n x = self.screen.get_width() - width\n y = 0\n pygame.draw.rect(self.screen, pygame.Color('black'),\n pygame.Rect((x, y), (width, height)))\n font = pygame.font.SysFont('gothic', 30)\n text = font.render(str(int(self.time // 10)).rjust(3, '0'), True, (255, 0, 0))\n text_x = (width // 2 - text.get_width() // 2) + x\n text_y = abs(y // 2 - text.get_height() // 2)\n self.screen.blit(text, (text_x, text_y))\n\n def draw_smiley(self):\n self.smiley.update()\n self.smiley.draw(self.screen)\n pygame.draw.rect(self.screen, pygame.Color('black'),\n pygame.Rect((self.smiley.sprite.rect.x, self.smiley.sprite.rect.y),\n (self.smiley.sprite.rect.width, self.smiley.sprite.rect.height)), width=1)\n\n def create_board(self):\n self.board = []\n for row in range(self.count_cells[1]):\n line = Cells(self.count_cells[0], row)\n self.board.append(line)\n\n def place_mines(self, exception):\n if exception == None:\n return\n current_mines = 0\n while current_mines < self.mines_count:\n cell = self.board[randrange(0, self.count_cells[1])].sprites()[randrange(0, self.count_cells[0])]\n if cell != exception and cell.condition != -2:\n cell.condition = -2\n current_mines += 1\n self.first_action = False\n self.open_cell(exception)\n\n def open_cell(self, cell):\n if cell == None:\n return\n if cell.condition == -2:\n cell.rect.x += 1\n cell.rect.y += 1\n cell.condition = 15\n self.loss()\n elif cell.condition == -1:\n cell.condition = 1\n cell.rect.x += 1\n cell.rect.y += 1\n neighbors = self.get_neighbors(cell.row, cell.column)\n mines_around = 0\n for neighbor in neighbors:\n if neighbor.condition in [-2, 13, 14, 15]:\n mines_around += 1\n if mines_around:\n cell.mines_around = mines_around\n else:\n for neighbor in neighbors:\n if neighbor.condition != -2:\n self.open_cell(neighbor)\n elif cell.condition == 1:\n neighbors = self.get_neighbors(cell.row, cell.column)\n mines_around = 0\n flag_around = 0\n for neighbor in neighbors:\n if neighbor.condition == -2:\n mines_around += 1\n elif neighbor.condition == 12:\n flag_around += 1\n if not mines_around:\n for neighbor in neighbors:\n if neighbor.condition not in [-2, 1]:\n self.open_cell(neighbor)\n else:\n if mines_around == flag_around:\n for neighbor in neighbors:\n if neighbor.condition == -2:\n neighbor.condition = 15\n self.loss()\n\n def loss(self):\n self.open_board()\n self.stop_game = True\n self.smiley.sprite.state = 0\n\n def victory(self):\n self.stop_game = True\n self.smiley.sprite.state = 1\n self.check_statistics()\n\n def open_board(self):\n for row in self.board:\n for cell in row.sprites():\n if cell.condition == -1:\n self.open_cell(cell)\n elif cell.condition == -2:\n cell.condition = 14\n cell.rect.x += 1\n cell.rect.y += 1\n elif cell.condition == 12:\n cell.condition = 16\n\n def get_neighbors(self, row_cell, column_cell):\n neighbors = []\n for row in range(row_cell - 1, row_cell + 2):\n for column in range(column_cell - 1, column_cell + 2):\n if 0 <= row <= self.count_cells[1] - 1 and 0 <= column <= self.count_cells[0] - 1:\n if row != row_cell or column != column_cell:\n neighbors.append(self.board[row].sprites()[column])\n return neighbors\n\n def cell_search(self, position):\n for line in self.board:\n for cell in line:\n if cell.rect.collidepoint(position):\n return cell\n\n def set_flag(self, cell):\n if cell == None:\n return\n if cell.condition == -2:\n cell.condition = 13\n self.flag_count -= 1\n elif cell.condition == -1:\n cell.condition = 12\n self.flag_count -= 1\n elif cell.condition == 12:\n cell.condition = -1\n self.flag_count += 1\n elif cell.condition == 13:\n cell.condition = -2\n self.flag_count += 1\n\n def restart(self):\n self.create_window()\n self.smiley.sprite.state = 2\n self.flag_count = self.mines_count\n self.manager = pygame_gui.UIManager((self.width, self.height))\n self.create_show_statistics_button()\n\n def checking_remaining_cells(self):\n close_cells = []\n for row in range(self.count_cells[1]):\n for column in range(self.count_cells[0]):\n cell = self.board[row].sprites()[column]\n if cell.condition == -2:\n close_cells.append(cell)\n elif cell.condition == -1:\n return None\n return close_cells\n\n def create_show_statistics_button(self):\n self.show_statistics_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect((self.width / 2 - 90, self.height - 35), (180, 30)),\n text='Посмотреть статистику',\n manager=self.manager)\n\n def show_statistics(self):\n self.statistics_table = StatisticsInMinesweeper()\n self.create_window()\n\n def output_statistics(self, best_time, new_record):\n font = pygame.font.SysFont('Gothic', 15)\n old_height = self.height\n if new_record:\n self.height += 55\n else:\n self.height += 40\n self.size = self.width, self.height\n self.screen = pygame.display.set_mode(self.size)\n self.screen.fill(pygame.Color((180, 180, 180)))\n if new_record:\n text = font.render('Новый рекорд!', True, (0, 0, 0))\n self.screen.blit(text, (0, old_height))\n pygame.draw.line(self.screen, 'black', (0, old_height), (self.width, old_height), width=1)\n pygame.draw.rect(self.screen, 'black', ((0, self.height - 40), (self.width / 2, 40)), width=1)\n pygame.draw.rect(self.screen, 'black', ((self.width / 2, self.height - 40), (self.width / 2, 40)), width=1)\n pygame.draw.line(self.screen, 'black', (0, self.height - 20), (self.width, self.height - 20), width=1)\n\n left_column_x = self.width / 4\n right_column_x = self.width / 2 + self.width / 4\n top_row_y = self.height - 30\n bottom_row_y = self.height - 10\n\n title_number_1 = font.render('Текущий результат', True, (0, 0, 0))\n title_number_1_x = left_column_x - title_number_1.get_width() // 2\n title_number_1_y = top_row_y - title_number_1.get_height() // 2\n self.screen.blit(title_number_1, (title_number_1_x, title_number_1_y))\n\n title_number_2 = font.render('Лучший результат', True, (0, 0, 0))\n title_number_2_x = right_column_x - title_number_2.get_width() // 2\n title_number_2_y = top_row_y - title_number_2.get_height() // 2\n self.screen.blit(title_number_2, (title_number_2_x, title_number_2_y))\n\n current_result = font.render(str(self.time / 10), True, (0, 0, 0))\n current_result_x = left_column_x - current_result.get_width() // 2\n current_result_y = bottom_row_y - current_result.get_height() // 2\n self.screen.blit(current_result, (current_result_x, current_result_y))\n\n best_result = font.render(str(best_time / 10), True, (0, 0, 0))\n best_result_x = right_column_x - best_result.get_width() // 2\n best_result_y = bottom_row_y - best_result.get_height() // 2\n self.screen.blit(best_result, (best_result_x, best_result_y))\n\n def check_statistics(self):\n database = sqlite3.connect('data/database.sql')\n cursor = database.cursor()\n time_in_statistics = cursor.execute('''SELECT time from statistics_in_minesweeper \n WHERE complexity=?''', (self.complexity,)).fetchone()[0]\n if self.time > int(time_in_statistics) and time_in_statistics != 0:\n self.output_statistics(time_in_statistics, False)\n else:\n self.output_statistics(self.time, True)\n cursor.execute('''UPDATE statistics_in_minesweeper\n SET time = ?\n WHERE complexity=?''', (self.time, self.complexity))\n database.commit()\n database.close()\n\n def hide_show_statistics_button(self):\n self.show_statistics_button.kill()\n self.width = self.CELL_SIZE * self.count_cells[0]\n self.height = self.CELL_SIZE * self.count_cells[1] + 40\n self.size = self.width, self.height\n self.screen = pygame.display.set_mode(self.size)\n self.screen.fill(pygame.Color((180, 180, 180)))\n\n def start(self):\n fps = 60\n clock = pygame.time.Clock()\n self.running = True\n while self.running:\n time_delta = clock.tick(fps) / 1000.0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if not self.stop_game:\n close_cells = self.checking_remaining_cells()\n if close_cells != None and len(close_cells) == self.flag_count:\n for cell in close_cells:\n self.set_flag(cell)\n if self.flag_count == 0:\n win = True\n for row in range(self.count_cells[1]):\n for column in range(self.count_cells[0]):\n if self.board[row].sprites()[column].condition == -2 or \\\n self.board[row].sprites()[column].condition == -1:\n win = False\n break\n if win:\n self.victory()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 3 and not self.first_action:\n self.set_flag(self.cell_search(event.pos))\n elif event.button == 1:\n if self.first_action:\n if self.cell_search(event.pos) != None:\n self.hide_show_statistics_button()\n self.place_mines(self.cell_search(event.pos))\n else:\n self.open_cell(self.cell_search(event.pos))\n if event.type == self.MYEVENTTYPE and not self.first_action:\n self.time += 1\n else:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n if self.smiley.sprite.rect.collidepoint(event.pos):\n self.restart()\n if event.type == pygame.USEREVENT:\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == self.show_statistics_button:\n self.show_statistics()\n clock = pygame.time.Clock()\n self.manager.process_events(event)\n clock.tick(fps)\n pygame.display.flip()\n self.render()\n self.manager.draw_ui(self.screen)\n self.manager.update(time_delta)\n for line in self.board:\n line.update()\n line.draw(self.screen)\n pygame.quit()","repo_name":"Yatsenko-Egor/MiniGames","sub_path":"Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":14520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33013325221","text":"import subprocess\nimport os\nimport shutil\nimport glob\n\n# Concatenate MDs\nprint(\"Concating MDs\")\n\nwith open(\"preout.md\", \"wb\") as preout:\n for file in os.listdir(\"src\"):\n with open(f\"src/{file}\", \"rb\") as temp:\n print(f\"--{file}\")\n shutil.copyfileobj(temp, preout)\n\n\nprint(\"Generating Tex File\")\nsubprocess.run([\"pandoc\", \n \"--lua-filter\", \"lib/div2latexenv.lua\",\n\t\t \"--lua-filter\", \"lib/minted.lua\",\n\t\t \"--template\", \"lib/main.tex\",\n\t\t \"preout.md\", \"-o\", \"out.tex\"])\n\n\nprint(\"Generating PDF\")\nsubprocess.run([\"xelatex\", \"-shell-escape\", \"out.tex\"])\n#Run two time to get references right\nsubprocess.run([\"xelatex\", \"-shell-escape\", \"out.tex\"])\n\n\nprint(\"Cleanup\")\nos.replace(\"out.pdf\", \"out/handout.pdf\")\n\ncleanup_files = glob.glob(\"out.*\")\nfor cf in cleanup_files:\n os.remove(cf)\n\nos.remove(\"preout.md\")\n\n\n\n\n","repo_name":"emil-freme/milbook","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12237255387","text":"from socket import socket\nimport utilities\n\nclass Server:\n def __init__(self, addr, cachipunAddr):\n # Establezco la dirección que escuchamos como la inmutable del Servidor Cachipún.\n self.cachipunAddr = self.cachipunHearingAddr = cachipunAddr\n # Creo el socket con el cual me voy a comunicar con el Servidor Cachipún.\n self.cachipunS = socket(type = utilities.UDP)\n # Aquí creo que socket por el cual se va a conectar el cliente.\n self.s = socket(type = utilities.TCP)\n self.s.bind(addr)\n self.s.listen(1)\n print(f'Servidor TCP establecido en el puerto: {addr[1]}')\n # Aquí acepto y fijo al cliente.\n self.clientS, clientAddr = self.s.accept()\n print(f'Dirección del cliente: {clientAddr}\\nEscuchando al Servidor Cachipún en la dirección: {cachipunAddr}')\n \n def __enter__(self):\n return self\n \n def __exit__(self, *args):\n self.s.close()\n self.cachipunS.close()\n self.clientS.close()\n \n def hearClient(self, buffer: int = 2048) -> bytes:\n return self.clientS.recv(buffer)\n \n def hearCachipun(self, msg: bytes, buffer: int = 2048) -> bytes:\n # Enviamos algo al Servidor Cachipún y vemos qué responde.\n self.cachipunS.sendto(msg, self.cachipunHearingAddr)\n return self.cachipunS.recv(buffer)\n\n def requestGame(self, msg: bytes) -> bytes:\n response, port = self.hearCachipun(msg).decode().split(',')\n if response == 'OK':\n print(f'Se cambió la dirección de escucha del Servidor Cachipún de {self.cachipunHearingAddr} a {(self.cachipunAddr[0], int(port))}')\n self.cachipunHearingAddr = (self.cachipunAddr[0], int(port))\n elif response != 'NO':\n raise Exception()\n return response.encode()\n \n def game(self):\n clientCount = 0\n cachipunCount = 0\n # Aquí hacemos el ciclo de juego hasta que alguno de los dos jugadores gane 3 partidas.\n while clientCount < 3 and cachipunCount < 3:\n shapes = (self.hearClient().decode(), self.hearCachipun('GETSHAPE'.encode()).decode())\n if utilities.beats[shapes] == utilities.WIN:\n result = 'WIN'\n clientCount += 1\n elif (utilities.beats[shapes] == utilities.LOSE):\n result = 'LOSE'\n cachipunCount += 1\n elif (utilities.beats[shapes] != utilities.DRAW):\n raise Exception('Se recibió un valor inesperado para el resultado de la partida.')\n else:\n result = 'DRAW'\n\n # Si el cliente gana le mandamos el mensaje 'WIN' y los datos.\n if clientCount == 3:\n self.clientS.send(f'{result},{clientCount},{cachipunCount},{shapes[1]},WIN'.encode())\n break\n # Si el Servidor Cachipún gana le mandamos al cliente el mensaje 'LOSE' y los datos.\n elif cachipunCount == 3:\n self.clientS.send(f'{result},{clientCount},{cachipunCount},{shapes[1]},LOSE'.encode())\n break\n elif clientCount > 3 or cachipunCount > 3:\n raise Exception('El contador del Cliente o del Servidor Cachipún ha superado las 3 victorias por juego.')\n self.clientS.send(f'{result},{clientCount},{cachipunCount},{shapes[1]},CONTINUE'.encode())\n # Una vez finalizada la partida solicitamos que puerto que se abrió para jugarla se cierre\n if self.hearCachipun('CLOSE'.encode()).decode() != 'OK':\n raise Exception('Se ha recibido una respuesta del Servidor Cachipún distinta de \"OK\".')\n print(f'Se cambió la dirección de escucha del Servidor Cachipún de {self.cachipunHearingAddr} a {self.cachipunAddr}')\n self.cachipunHearingAddr = self.cachipunAddr\n\ndef main():\n with Server(('localhost', 49152), ('localhost', 49153)) as server:\n response = server.hearClient()\n print(f'Solicitud {response.decode()} recibida.')\n if response.decode() != 'REQUESTGAME':\n raise Exception('Se recibió una respuesta del Cliente distinta de \"REQUESTGAME\".')\n # Solicitiamos partida.\n response = server.requestGame(response)\n\n while True:\n server.clientS.send(response)\n # Si se aceptó la partida prodecemos a jugarla.\n if response.decode() == 'OK':\n server.game()\n response = server.hearClient()\n print(f'Solicitud {response.decode()} recibida.')\n if response.decode() == 'REQUESTGAME':\n response = server.requestGame(response)\n elif response.decode() == 'STOP':\n # Aquí solicitamos finalizar el programa.\n response = server.hearCachipun(response)\n if response.decode() != 'OK':\n raise Exception('El Servidor Cachipún ha respondido con un mensaje distinto de \"OK\"')\n server.clientS.send(response)\n print('Finalizando ejecución...')\n break\n else:\n raise Exception('No se ha podido enviar el mensaje al Servidor Cachipún.')\n\nif __name__ == '__main__':\n main()\n","repo_name":"lilkimo/INF256-Tarea-1","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72621333317","text":"import cv2\n\n\ndef resize_frame(frame, view_width, view_height):\n height, width, _ = frame.shape\n reduction_factor = (float(view_height)) / height * 100\n reduced_width = int(width * reduction_factor / 100)\n reduced_height = int(height * reduction_factor / 100)\n dim = (reduced_width, reduced_height)\n resized_frame = cv2.resize(frame, dim, interpolation=cv2.INTER_LINEAR)\n return resized_frame\n\n\ndef pixel_to_rgb(pixel):\n bgr = tuple(float(x) for x in pixel[:3])\n return tuple(reversed(bgr))\n\n\ndef get_video(filename):\n return cv2.VideoCapture(filename)\n\n\ndef get_video_props(cap):\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n length = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n fps = cap.get(cv2.CAP_PROP_FPS)\n return width, height, length, fps\n\n\ndef get_frame(cap, view_width=None, view_height=None):\n while cap.isOpened():\n _ret, frame = cap.read()\n if frame is None:\n break\n else:\n if view_height is None or view_width is None:\n yield frame\n else:\n yield resize_frame(frame, view_width, view_height)\n","repo_name":"fraigo/python-video-to-canvas","sub_path":"video_utils.py","file_name":"video_utils.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"13431098911","text":"import nc_tools\nimport matplotlib.pyplot as plt\nimport numpy as np\nfile_Nov4 = ('/archive/Junyi.Chai/QG_exp/Nov4_drag5e-1', 'Nov4_drag5e-1_spectra_seg11.nc')\nfile_Nov5 = ('/archive/Junyi.Chai/QG_exp/Nov5_Sc1.6_drag5e-1', 'Nov5_Sc1.6_drag5e-1_spectra_seg11.nc')\nfile_Nov4Reso2x = ('/archive/Junyi.Chai/QG_exp/Nov4_Reso2x_Sc1.6_drag5e-1', 'Nov4_Reso2x_Sc1.6_drag5e-1_spectra_seg6.nc')\n\nvar_name = 'xferms'\nmode = 3\n\nvar_Nov4 = np.mean(nc_tools.ncread(file_Nov4[0], file_Nov4[1], var_name), 0)\nvar_Nov5 = np.mean(nc_tools.ncread(file_Nov5[0], file_Nov5[1], var_name), 0)\nvar_Nov4Reso2x = np.mean(nc_tools.ncread(file_Nov4Reso2x[0], file_Nov4Reso2x[1], var_name), 0)\n\nk = np.arange(1, 512)\nk2x = np.arange(1, 1024)\nplt.semilogx(k, k*var_Nov4[mode], label='kd=250')\nplt.semilogx(k/2, k/2*var_Nov5[mode], label='kd=500, k/2')\nplt.semilogx(k2x, k2x*var_Nov4Reso2x[mode], label='kd=250, reso x2')\nplt.legend(loc='best')\nplt.xlabel('Wavenumber')\n#plt.ylabel(r'$U\\mathrm{Re}[\\psi_{k}^{*}(\\partial\\nabla^{2}\\tau/\\partial x)_{k}]$')\nplt.ylabel(r'$\\mathrm{Re}[\\psi_{k}^{*}J_{k}(\\tau,\\nabla^{2}\\tau)]$')\nplt.xlim([0, 1023])\nplt.show()","repo_name":"mathsam/fluid_dynamics_analysis","sub_path":"plot/compare_gens.py","file_name":"compare_gens.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"13546564469","text":"def zad19(tab):\r\n n = len(tab)\r\n maks_dlugosc = 0\r\n for i in range(n): #start\r\n curr_dlugosc = 1\r\n sum_element = tab[i]\r\n sum_indeks = i\r\n for j in range(i, n-1): #dlugosc\r\n if tab[j] >= tab[j+1]:\r\n break\r\n else:\r\n curr_dlugosc += 1\r\n sum_element += tab[j+1]\r\n sum_indeks += j+1\r\n if sum_indeks == sum_element:\r\n maks_dlugosc = max(maks_dlugosc, curr_dlugosc)\r\n return maks_dlugosc\r\n\r\n\r\nx = [0,1,2,7,3,4,8,6]\r\nprint(zad19(x))\r\n","repo_name":"radoslawrolka/Introduction_to_Computer_Science_Course","sub_path":"Zestaw_3/z19_d.py","file_name":"z19_d.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26708656548","text":"\"\"\"\nThis module is responsible for writing data from the multimeter to a CSV file,\nit's a very simplistic wrapper around the `csv.writer` object - and performs\nsome basic transformation logic.\n\"\"\"\nimport logging\n\nfrom collections import namedtuple\nfrom csv import writer\nfrom datetime import datetime\nfrom typing import Optional, Union\n\nfrom fs9721_utils.reading import NonNumericReadingError, Reading, readable_unit\n\n\n_LOGGER = logging.getLogger(\"fs9721\")\n\n_CSV_COLUMNS = [\"time\", \"value\", \"unit\"]\n\nCSVRow = namedtuple(\"CSVRow\", _CSV_COLUMNS)\n\nLoggable = Union[CSVRow, Reading]\n\ndef _parse_reading(reading: Reading) -> CSVRow:\n read_at = datetime.now()\n\n try:\n value = reading.value()\n except NonNumericReadingError:\n value = \"L\"\n\n return CSVRow(\n time=read_at, value=value,\n unit=readable_unit(reading.units())\n )\n\n\nclass CSVWriterNotReadyError(Exception):\n \"\"\"\n CSVWriterNotReady is thrown when there's an attempt to write a row to the\n CSV file but the CSV file has already been closed. CSVWriter will not\n reopen the file by default.\n \"\"\"\n\n\nclass CSVWriter:\n \"\"\"\n Logger provides a simple wrapper for logging values to a CSV file; handling\n the transformation of values and the management of underlying files.\n\n When `filename` is specified it sets the name of the destination CSV file.\n When `auto_reopen` is specified then the CSV file will automatically be opened\n in the event that there's an attempted write and the file has been closed.\n \"\"\"\n def __init__(self, filename: Optional[str] = None, auto_reopen: bool = False):\n self.auto_reopen = auto_reopen\n self.csv = None\n self.writing = False\n\n self.filename = filename if filename else f\"{datetime.now().isoformat()}-dmm-log.csv\"\n self._open()\n\n def _open(self):\n _LOGGER.info(\"opening CSV file for writing\",extra={\"filename\": self.filename})\n self.output = open(self.filename, \"a\") # pylint: disable=consider-using-with,unspecified-encoding\n self.csv = writer(self.output)\n self.csv.writerow(_CSV_COLUMNS)\n self.writing = True\n\n def log_value(self, value, unit):\n \"\"\"logs out a key/value pairing of value and unit.\"\"\"\n self.log(CSVRow(time=datetime.now(), value=value, unit=unit))\n\n def log(self, entry: Loggable):\n \"\"\"logs out a `Loggable` type - either a `CSVRow` or a `Reading`.\"\"\"\n if not self.writing:\n _LOGGER.warning(\"attempt to write CSV data when file is unavailable\")\n if self.auto_reopen:\n self._open()\n else:\n raise CSVWriterNotReadyError\n\n if isinstance(entry, Reading):\n entry = _parse_reading(entry)\n\n row = [entry.time.isoformat(), entry.value, entry.unit]\n _LOGGER.debug(\"writing row to CSV file\", extra={\"row\": row})\n self.csv.writerow(row)\n\n def stop(self):\n \"\"\"closes the file for writing and marks itself as finished.\"\"\"\n _LOGGER.debug(\"csv logging stopping: closing file\")\n if self.writing:\n self.output.close()\n self.writing = False\n\n @property\n def is_logging(self) -> bool:\n \"\"\"returns whether or not the CSV logger has the file ready for writing\"\"\"\n return self.writing\n","repo_name":"FergusInLondon/fs9721-utils","sub_path":"fs9721_utils/csv_logger.py","file_name":"csv_logger.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"23674557200","text":"from stack.stack import Stack\n\n\ndef is_match(p1, p2):\n if p1 == \"(\" and p2 == \")\":\n return True\n elif p1 == \"{\" and p2 == \"}\":\n return True\n elif p1 == \"[\" and p2 == \"]\":\n return True\n else:\n return False\n pass\n\n\ndef is_balanced(parameter):\n\n is_balanced = True\n index = 0\n s=Stack()\n\n while index < len(parameter) and is_balanced:\n paren = parameter[index]\n if paren in \"({[\":\n s.push(paren)\n else:\n if s.is_empty():\n is_balanced=False\n break\n else:\n top = s.pop()\n if not is_match(top,paren):\n is_balanced = False\n break\n index = index + 1\n\n if s.is_empty() and is_balanced:\n return True\n else:\n return False\n\n\n\n\n","repo_name":"danonymous856/Jett_Interview","sub_path":"stack/determine_if_brackets_are_balanced.py","file_name":"determine_if_brackets_are_balanced.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19509059754","text":"import pandas as pd\nfrom pytest_dbt.pytest_dbt import run_model\nfrom pandas.testing import assert_frame_equal\n\nstg_customers = [\n {\"customer_id\": 1, \"first_name\": \"Alice\", \"last_name\": \"Henderson\"},\n {\"customer_id\": 2, \"first_name\": \"Bob\", \"last_name\": \"Doe\"},\n {\"customer_id\": 3, \"first_name\": \"Charlie\", \"last_name\": \"Krick\"},\n {\"customer_id\": 4, \"first_name\": \"Dennis\", \"last_name\": \"Smith\"},\n]\nstg_orders = [\n {\"order_id\": 1, \"customer_id\": 1, \"order_date\": \"2020-01-01\", \"status\": \"returned\"},\n {\n \"order_id\": 2,\n \"customer_id\": 1,\n \"order_date\": \"2020-01-02\",\n \"status\": \"completed\",\n },\n {\n \"order_id\": 3,\n \"customer_id\": 2,\n \"order_date\": \"2020-01-01\",\n \"status\": \"completed\",\n },\n {\n \"order_id\": 4,\n \"customer_id\": 2,\n \"order_date\": \"2020-01-04\",\n \"status\": \"completed\",\n },\n {\n \"order_id\": 5,\n \"customer_id\": 3,\n \"order_date\": \"2020-01-05\",\n \"status\": \"completed\",\n },\n]\n\nstg_payments = [\n {\"payment_id\": 1, \"order_id\": 1, \"payment_method\": \"card\", \"amount\": 10},\n {\"payment_id\": 2, \"order_id\": 2, \"payment_method\": \"card\", \"amount\": 190},\n {\"payment_id\": 3, \"order_id\": 3, \"payment_method\": \"card\", \"amount\": 50},\n {\"payment_id\": 4, \"order_id\": 4, \"payment_method\": \"card\", \"amount\": 250},\n {\"payment_id\": 5, \"order_id\": 5, \"payment_method\": \"card\", \"amount\": 100},\n]\n\n# expected output\ncustomers = [\n {\n \"customer_id\": 1,\n \"first_name\": \"Alice\",\n \"last_name\": \"Henderson\",\n \"first_order\": \"2020-01-01\",\n \"most_recent_order\": \"2020-01-02\",\n \"number_of_orders\": 2,\n \"customer_lifetime_value\": 200,\n },\n {\n \"customer_id\": 2,\n \"first_name\": \"Bob\",\n \"last_name\": \"Doe\",\n \"first_order\": \"2020-01-01\",\n \"most_recent_order\": \"2020-01-04\",\n \"number_of_orders\": 2,\n \"customer_lifetime_value\": 300,\n },\n {\n \"customer_id\": 3,\n \"first_name\": \"Charlie\",\n \"last_name\": \"Krick\",\n \"first_order\": \"2020-01-05\",\n \"most_recent_order\": \"2020-01-05\",\n \"number_of_orders\": 1,\n \"customer_lifetime_value\": 100,\n },\n {\n \"customer_id\": 4,\n \"first_name\": \"Dennis\",\n \"last_name\": \"Smith\",\n \"first_order\": None,\n \"most_recent_order\": None,\n \"number_of_orders\": None,\n \"customer_lifetime_value\": None,\n },\n]\n\ntables = {\n \"stg_customers\": stg_customers,\n \"stg_orders\": stg_orders,\n \"stg_payments\": stg_payments,\n}\n\n\ndef test_customers():\n result = run_model(\"customers\", tables)\n assert assert_frame_equal(result, pd.DataFrame(customers)) == None\n","repo_name":"anupkalburgi/pytest_dbt","sub_path":"jaffle_shop/tests/test_customers.py","file_name":"test_customers.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"2561618599","text":"import matplotlib.pyplot as plt \r\nimport os\r\nimport numpy as np\r\nimport matplotlib.image as mpimg\r\nimport json\r\nimport scipy.io as scio\r\nfrom matplotlib import gridspec\r\nimport pandas as pd\r\nimport argparse\r\n\r\ndef enlarge(x):\r\n\tfor i in range(len(x)):\r\n\t\tx[i]=x[i]*2-1\r\n\tfor i in range(len(x)):\r\n\t\tif x[i]>0:\r\n\t\t\tx[i]=x[i]\r\n\t\tif x[i]<=0:\r\n\t\t\tx[i]=x[i]*0.5\r\n\treturn x\r\n\r\ndef distance(x1,x2,y1,y2,z1,z2):\r\n\r\n\td1 = np.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)\r\n\treturn d1\r\ndef normalize(x):\r\n\tfor i in range(len(x)):\r\n\t\tx[i]= ((x[i]-18)/(45-18))\r\n\treturn(x)\r\n\r\ndef smooth(x,window_size):\r\n\ty =x.copy()\r\n\tfor i in range(len(x)):\r\n\t\tadd = 0\r\n\t\tfor j in range(int(i-(window_size-1)/2),int(i+(window_size-1)/2+1)):\r\n\r\n\t\t\tif j<0:\r\n\t\t\t\tj=0\r\n\t\t\telif j>len(x)-1:\r\n\t\t\t\tj=len(x)-1\r\n\t\t\tadd+=y[j]\r\n\t\ty[i]=add/window_size\r\n\treturn y\r\n\r\ndef extract_label(x):\r\n\tpass\r\n\r\ndef get_others(file,l,img_file):\r\n\teyebrow = []\r\n\tdf = pd.read_csv('openface/'+file+'.csv')\r\n\t\r\n\tfor i in range(l):\r\n\t\tidx = eval(img_file[i].split('frame')[-1].split('.jpg')[0])\r\n\t\t\r\n\r\n\t\t\r\n\t\tx1 = float(df[' X_41'][idx])\r\n\t\tx2 = float(df[' X_40'][idx])\r\n\t\tx3 = float(df[' X_39'][idx])\r\n\t\tx4 = float(df[' X_42'][idx])\r\n\t\tx5 = float(df[' X_47'][idx])\r\n\t\tx6 = float(df[' X_46'][idx])\r\n\r\n\t\tX1 = float(df[' X_19'][idx])\r\n\t\tX2 = float(df[' X_20'][idx])\r\n\t\tX3 = float(df[' X_21'][idx])\r\n\t\tX4 = float(df[' X_22'][idx])\r\n\t\tX5 = float(df[' X_23'][idx])\r\n\t\tX6 = float(df[' X_24'][idx])\r\n\t\r\n\t\ty1 = float(df[' Y_41'][idx])\r\n\t\ty2 = float(df[' Y_40'][idx])\r\n\t\ty3 = float(df[' Y_39'][idx])\r\n\t\ty4 = float(df[' Y_42'][idx])\r\n\t\ty5 = float(df[' Y_47'][idx])\r\n\t\ty6 = float(df[' Y_46'][idx])\r\n\r\n\t\tY1 = float(df[' Y_19'][idx])\r\n\t\tY2 = float(df[' Y_20'][idx])\r\n\t\tY3 = float(df[' Y_21'][idx])\r\n\t\tY4 = float(df[' Y_22'][idx])\r\n\t\tY5 = float(df[' Y_23'][idx])\r\n\t\tY6 = float(df[' Y_24'][idx])\r\n\t\r\n\t\tz1 = float(df[' Z_41'][idx])\r\n\t\tz2 = float(df[' Z_40'][idx])\r\n\t\tz3 = float(df[' Z_39'][idx])\r\n\t\tz4 = float(df[' Z_42'][idx])\r\n\t\tz5 = float(df[' Z_47'][idx])\r\n\t\tz6 = float(df[' Z_46'][idx])\r\n\t\tZ1 = float(df[' Z_19'][idx])\r\n\t\tZ2 = float(df[' Z_20'][idx])\r\n\t\tZ3 = float(df[' Z_21'][idx])\r\n\t\tZ4 = float(df[' Z_22'][idx])\r\n\t\tZ5 = float(df[' Z_23'][idx])\r\n\t\tZ6 = float(df[' Z_24'][idx])\r\n\t\td1 = distance(x1,X1,y1,Y1,z1,Z1)\r\n\t\td2 =distance(x2,X2,y2,Y2,z2,Z2)\r\n\t\td3 =distance(x3,X3,y3,Y3,z3,Z3)\r\n\t\td4 = distance(x4,X4,y4,Y4,z4,Z4)\r\n\t\td5 = distance(x5,X5,y5,Y5,z5,Z5)\r\n\t\td6 = distance(x6,X6,y6,Y6,z6,Z6)\r\n\r\n\t\teyebrow.append((d1+d2+d3+d4+d5+d6)/6)\r\n\t\r\n\t\t\r\n\treturn eyebrow\r\n\r\n\r\n\r\ndef main(f):\r\n\t# print(f)\r\n\tfile = f\r\n\tpart1 = file.split('cam2')[0]\r\n\tpart2 = file.split('cam2')[1]\r\n\tnew_name = part1+'cam1'+part2.split('_')[0]\r\n\tnew_name_2 = file.split('ss3')[0]+'ss3'\r\n\tstart_frame = eval(file.split('_')[-1].split('to')[0])\r\n\tend_frame = eval(file.split('_')[-1].split('to')[1][:-4])\r\n\tduration = end_frame-start_frame\r\n\r\n\ttry:\r\n\t\tsign = scio.loadmat('sign/'+'_'.join(file.split('_')[:2])+'.mp4/'+str(start_frame)+'to'+str(end_frame))\r\n\texcept:\r\n\t\tsign = scio.loadmat('sign/'+new_name+'.mp4/'+str(start_frame)+'to'+str(end_frame))\r\n\tmanual= sign['sign']\r\n\r\n\tspecial = sign['special']\r\n\tspecial = sorted(special, key=lambda x: x[1])\r\n\r\n\r\n\tif not os.path.exists('draw/carol/'+file[:-4]):\r\n\t\tos.mkdir('draw/carol/'+file[:-4])\r\n\r\n\r\n\tf1 = open('gt_onoffset/'+''.join(f.split('cam2-for-ss3'))[:-4]+'.txt')\r\n\tlabel = json.load(f1)\r\n\tf1.close()\r\n\r\n\tf2 = open('pred_onoffset/'+''.join(f.split('cam2-for-ss3'))[:-4]+'.txt')\r\n\tlabel2 = json.load(f2)\r\n\tf2.close()\r\n\r\n\r\n\tdata = scio.loadmat('dataset/'+file)\r\n\tgt = data['gt'][0]\r\n\timg_file = list(data['path'])\r\n\t\r\n\tfor i in range(len(img_file)):\r\n\t\timg_file[i] = img_file[i].split()[0]\r\n\r\n\tresult = data['pred'][0]\r\n\r\n\timg_pre ='frame'+img_file[0].split('frame')[1]+'frame'\r\n\t\r\n\t\r\n\tlm =enlarge(normalize(get_others(new_name_2,len(gt),img_file))) #lm is eye_brow_height calculated by landmark\r\n\r\n\tprint(max(lm))\r\n\tprint(min(lm))\r\n\t\t\r\n\tfig = plt.figure(1) \r\n\t\r\n\tgs = gridspec.GridSpec(2,3,height_ratios=[1.4,2.7]) # 3 pics on upper subgraph, 1\r\n\r\n\r\n\t# ax 1 to 3 are pics\r\n\tgs.update(wspace=0.0,hspace = 0)\r\n\tax1 = plt.subplot(gs[0,0])\r\n\r\n\t\r\n\timage = mpimg.imread(img_file[0])\r\n\tax1.imshow(image)\r\n\tax1.axis('off')\r\n\tax1.spines['top'].set_visible(False)\r\n\tax1.spines['right'].set_visible(False)\r\n\tax1.spines['bottom'].set_visible(False)\r\n\tax1.spines['left'].set_visible(False)\r\n\t\r\n\tax2 = plt.subplot(gs[0,1])\r\n\timage = mpimg.imread(img_file[10])\r\n\tax2.imshow(image)\r\n\tax2.axis('off')\r\n\tax2.spines['top'].set_visible(False)\r\n\tax2.spines['right'].set_visible(False)\r\n\tax2.spines['bottom'].set_visible(False)\r\n\tax2.spines['left'].set_visible(False)\r\n\t\r\n\tax3 = plt.subplot(gs[0,2])\t\r\n\timage = mpimg.imread(img_file[-5])\r\n\tax3.imshow(image)\r\n\tax3.axis('off')\r\n\tax3.spines['top'].set_visible(False)\r\n\tax3.spines['right'].set_visible(False)\r\n\tax3.spines['bottom'].set_visible(False)\r\n\tax3.spines['left'].set_visible(False)\r\n\t\r\n\tx = np.arange(len(gt))\r\n\ty1 = smooth(result,5) # eye_brow prediction\r\n\ty2 = smooth(lm,5)\t# eye_brow from landmark\r\n\t\r\n\tax4 = plt.subplot(gs[1,:])\r\n\tplt.xlim((0, len(x)))\r\n\tplt.ylim((-3.5,1.2))\r\n\r\n\r\n\tax4.plot(x[3:-3],y1[3:-3],color = 'red',label='deep learning',lw=1.5)\r\n\tax4.plot(x[3:-3],y2[3:-3],color= 'lime',label='landmark',lw=1.5)\r\n\tplt.xlabel('frame',fontsize=10)\r\n\tplt.ylabel('Eyebrow Movement Intensity',fontsize=10)\r\n\tax4.tick_params(axis='both',labelsize=100)\r\n\tplt.setp(ax4.get_xticklabels(), visible=False)\r\n\tplt.setp(ax4.get_yticklabels(), visible=False)\r\n\t\r\n\r\n\r\n\tplt.tick_params(labelsize=4)\r\n\r\n\te=0\r\n\t# Draw manual marker\r\n\tfor l in range(len(manual)):\r\n\t\ts=int(manual[l][1])-start_frame\r\n\t\tif slegal rate or by default rate applied by the European Central Bank to its most recent refinancing operation plus 10 points') % {'french_rate': 'http://www.minefe.gouv.fr/directions_services/dgtpe/taux/taux_legal.php',\n 'ecb_rate': 'http://fr.global-rates.com/taux-de-interets/banques-centrales/banque-centrale-europeenne/taux-de-bce.aspx'})\n\n class Meta:\n model = Invoice\n exclude = ['owner', 'uuid', 'proposal', 'amount']\n\n def __init__(self, *args, **kwargs):\n super(InvoiceForm, self).__init__(*args, **kwargs)\n self.fields['edition_date'].widget.attrs['class'] = 'date'\n self.fields['payment_date'].widget.attrs['class'] = 'date'\n self.fields['paid_date'].widget.attrs['class'] = 'date'\n self.fields['execution_begin_date'].widget.attrs['class'] = 'date'\n self.fields['execution_end_date'].widget.attrs['class'] = 'date'\n self.fields['penalty_date'].widget.attrs['class'] = 'date'\n self.fields['footer_note'].widget.attrs['size'] = '90'\n\n def clean(self):\n super(InvoiceForm, self).clean()\n cleaned_data = self.cleaned_data\n state = cleaned_data.get(\"state\")\n paid_date = cleaned_data.get(\"paid_date\")\n\n if state == INVOICE_STATE_PAID and not paid_date:\n msg = _('This field is required since invoice state is set to \"paid\".')\n self._errors[\"paid_date\"] = self.error_class([msg])\n\n del cleaned_data[\"paid_date\"]\n\n payment_type = cleaned_data.get('payment_type')\n if state == INVOICE_STATE_PAID and not payment_type:\n msg = _('This field is required since invoice state is set to \"paid\".')\n self._errors[\"payment_type\"] = self.error_class([msg])\n\n del cleaned_data[\"payment_type\"]\n\n return cleaned_data\n\nclass InvoiceRowForm(ModelForm):\n quantity = forms.DecimalField(max_digits=6, decimal_places=2, label=_('Quantity'), localize=True)\n unit_price = forms.DecimalField(max_digits=12, decimal_places=2, label=_('Unit price'), localize=True)\n\n class Meta:\n model = InvoiceRow\n exclude = ['owner', 'uuid']\n\n def __init__(self, *args, **kwargs):\n super(InvoiceRowForm, self).__init__(*args, **kwargs)\n self.fields['label'].widget.attrs['class'] = 'label-field'\n self.fields['proposal'].widget.attrs['class'] = 'proposal-field'\n self.fields['balance_payments'].widget.attrs['class'] = 'balance-payments-field'\n self.fields['category'].widget.attrs['class'] = 'category-field'\n self.fields['quantity'].widget.attrs['class'] = 'quantity-field'\n self.fields['unit_price'].widget.attrs['class'] = 'unit-price-field'\n self.fields['vat_rate'].widget.attrs['class'] = 'vat-rate-field'\n self.fields['detail'].widget.attrs['class'] = 'row-detail'\n","repo_name":"fgaudin/aemanager","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"62"} +{"seq_id":"27994296225","text":"# original simulation written by https://github.com/tangbj and refactored by https://github.com/AnthonyChuah\n# the multiple-OT and highest-hp target selection logic\n\n# hateful strikes deal between 22-29k damage every 1.2s\n# this simulation will only consider how many times a healing team of 3 will let an offtank die\n# patchwerk will enrage during the last 5%, but we ignore that since tanks will save shield wall\n# does not take into account batching\n\nOFFTANK_MAX_HEALTH = 10000\nFIGHT_LENGTH = 60 * 4\nAVERAGE_MITIGATION = 0.7\nPATCHWERK_MISS_CHANCE = 0.3\nAVERAGE_PLUS_HEAL = 1000\nAMPLIFY_MAGIC = True\nMAGIC_ATTUNEMENT = True\n\n# HOLY TALENTS\nPOINTS_IN_IMPROVED_HEALING = 3\nPOINTS_IN_SPIRITUAL_HEALING = 5\nPOINTS_IN_SPIRITUAL_GUIDANCE = 5\n\n# assume there is some sort of variance between casts\nREACTION_TIME = 0.2\nHEALER_CRIT_CHANCE = 0.2\n\n# assume rough spirit score of 350\nTOTAL_PLUS_HEAL = AVERAGE_PLUS_HEAL + (150 if AMPLIFY_MAGIC else 0) + (75 if MAGIC_ATTUNEMENT else 0) + \\\n (350 * 0.25 * POINTS_IN_SPIRITUAL_GUIDANCE / 5)\n\nimport argparse\nimport heapq\nimport random\n\n\ndef get_hateful_strike_damage():\n damage = random.random() * (29000 - 22000) + 22000\n damage *= (1 - AVERAGE_MITIGATION)\n return round(damage)\n\n# tuple is average base healing and unmodified healing cost and cast time\nhealing_spell_data = {\n 'h4': (779.5, 305, 2.5),\n}\n\n# pass in name and rank of spell (e.g. h3, gh1)\ndef get_heal(spell):\n base_healing, mana_cost, cast_time = healing_spell_data.get(spell)\n mana_cost *= (1 - 0.05 * POINTS_IN_IMPROVED_HEALING)\n # spirutal healing adds max of 10% to base heal\n base_healing *= (1 + POINTS_IN_SPIRITUAL_HEALING / 5 * 0.1)\n total_healing = base_healing + 3 / 3.5 * TOTAL_PLUS_HEAL\n if random.random() <= HEALER_CRIT_CHANCE:\n total_healing *= 1.5\n return total_healing, mana_cost, cast_time\n\nclass Event:\n def is_hateful(self):\n return self._entity == 0\n def __init__(self, entity, time):\n self._entity = entity # 0 for Patchwerk, 1 for first healer, 2 for second, 3 for third, etc.\n self._time = time # time in seconds from start of fight\n def __lt__(self, other):\n return self._time < other._time\n def __gt__(self, other):\n return other < self\n def __str__(self):\n time = \"{0: >5}\".format(str(self._time))\n name = \"\"\n if self._entity == 0:\n name = \"Patchwerk Hateful\"\n else:\n name = \"Healer #{} Heal\".format(self._entity)\n return \"[Time {}] {}\".format(time, name)\n\n# updated hateful strike to hit every 1.2s instead of random number from 1.2 to 2s\ndef get_timetonext_hateful():\n return 1.2\n # seconds_later = (random.random() * (0.8499) + 1.2)\n# return round(seconds_later, 1)\n\ndef get_hateful_target(tanks_health):\n return tanks_health.index(max(tanks_health))\n\n# tank 0 is healed by healers [1, 2, 3], tank 1 by healers [4, 5, 6], tank 2 by healers [7, 8, 9]\ndef get_heal_target(healer_idx):\n return (healer_idx + 2) // 3 - 1\n\ndef heal_tank(tanks_health, tank_idx, heal_qty):\n # print(\"Tank #{} ({} hp) is healed for {}\".format(tank_idx, tanks_health[tank_idx], heal_qty))\n tanks_health[tank_idx] += heal_qty\n if tanks_health[tank_idx] > OFFTANK_MAX_HEALTH:\n # print(\"Overhealed {}\".format(tanks_health[tank_idx] - OFFTANK_MAX_HEALTH))\n tanks_health[tank_idx] = OFFTANK_MAX_HEALTH\n\ndef smash_tank(tanks_health, tank_idx, dmg):\n if random.random() < PATCHWERK_MISS_CHANCE:\n # print(\"Hateful Strike MISSES tank #{}\".format(tank_idx))\n return False\n # print(\"Hateful Strike hits tank #{} ({} hp) for {} dmg\".format(tank_idx, tanks_health[tank_idx], dmg))\n tanks_health[tank_idx] -= dmg\n if tanks_health[tank_idx] <= 0:\n # print(\"Tank #{} has DIED! ({} Overkill)\".format(tank_idx, -tanks_health[tank_idx]))\n return True\n return False\n\ndef run_simulation():\n PATCHWERK = 0\n event_heap = []\n heapq.heappush(event_heap, Event(PATCHWERK, 0))\n # print(\"Patchwerk first Hateful Strike scheduled to land at 0 seconds\")\n for ii in range(1, 10):\n _, _, cast_time = get_heal('h4')\n start = round(random.random() * cast_time, 1)\n # print(\"Healer #{} randomly scheduled to land first heal at {} seconds\".format(ii, start))\n heapq.heappush(event_heap, Event(ii, start))\n tanks_health = [OFFTANK_MAX_HEALTH, OFFTANK_MAX_HEALTH, OFFTANK_MAX_HEALTH]\n elapsed = 0\n heapq.heapify(event_heap)\n while elapsed < FIGHT_LENGTH:\n next_event = heapq.heappop(event_heap)\n # print(\"{} {}\".format(tanks_health, next_event))\n if next_event.is_hateful():\n target_idx = get_hateful_target(tanks_health)\n death = smash_tank(tanks_health, target_idx, get_hateful_strike_damage())\n if death:\n break\n delay = get_timetonext_hateful()\n heapq.heappush(event_heap, Event(PATCHWERK, round(elapsed + delay, 1)))\n else:\n healer_idx = next_event._entity\n target_idx = get_heal_target(healer_idx)\n heal_amount, _, cast_time = get_heal('h4')\n heal_tank(tanks_health, target_idx, heal_amount)\n human_delay = round(REACTION_TIME * random.random(), 1)\n heapq.heappush(event_heap, Event(healer_idx, round(elapsed + cast_time + human_delay, 1)))\n elapsed = next_event._time # increment timer\n if elapsed >= FIGHT_LENGTH:\n # print(\"Congrats! Patchwerk is dead\")\n return True\n else:\n # print(\"TANK DIES; WHY NO HEALS NOOBS\")\n return False\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sims\", required=True)\n args = parser.parse_args()\n number_simulations = int(args.sims)\n number_survived = 0\n for _ in range(number_simulations):\n if run_simulation():\n number_survived += 1\n\n print('Number of times tank survived: {} ({}%)'.format(number_survived, number_survived / number_simulations * 100))","repo_name":"aitutor22/patchwerk_sim","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8424027219","text":"import logging\nfrom datetime import datetime\n\nfrom notedown_api.models import NoteModel\nfrom notedown_api.extensions import db\nfrom notedown_api.namespaces.auth.auth_controller import AuthController\nfrom notedown_api.utils import decode_token\n\nlogger = logging.getLogger()\n\n\nclass NotesController(AuthController):\n\n @staticmethod\n def get_email_from_token(token: str) -> str:\n \"\"\"Decode token information and extract the user's email.\n\n Args:\n token: Token to get info from.\n\n Returns:\n User's email address on the token.\n \"\"\"\n payload = decode_token(token)\n user_email = payload.get('user')\n return user_email\n\n def add_user_note(self, email: str, note_text: str) -> NoteModel:\n \"\"\"Add a note to the database linked to the given user.\n\n Args:\n email: User's email.\n note_text: Text of the note.\n\n Returns:\n NoteModel added.\n \"\"\"\n user = self.get_user(email)\n note = NoteModel(\n text=note_text,\n date_created=datetime.utcnow(),\n date_edited=datetime.utcnow(),\n user=[user]\n )\n db.session.add(note)\n db.session.commit()\n return note\n\n def get_user_note(self, email: str, note_id: str) -> NoteModel:\n \"\"\"Retrieve a note by a given id.\n\n Args:\n email: User's email.\n note_id: Note's id.\n\n Returns:\n NoteModel retrieved from the database.\n \"\"\"\n user = self.get_user(email)\n note = user.notes.filter_by(id=note_id).first()\n return note\n\n def get_user_notes(self, email: str) -> list:\n \"\"\"Retrieve all user's notes from the database.\n\n Args:\n email: User's email.\n\n Returns:\n List of NoteModel entities.\n \"\"\"\n user = self.get_user(email)\n notes = user.notes.all()\n return notes\n\n def delete_user_note(self, email: str, note_id: str) -> NoteModel:\n \"\"\"Delete a note on the database.\n\n Args:\n email: User's email.\n note_id: Note's id.\n\n Returns:\n Deleted NoteModel\n \"\"\"\n user = self.get_user(email)\n note = user.notes.filter_by(id=note_id).first()\n user.notes.remove(note)\n db.session.commit()\n return note\n\n def edit_user_note(self, email: str, note_id: str,\n note_text: str) -> NoteModel:\n \"\"\"Edit some attributes to a given note.\n\n Args:\n email: User's email.\n note_id: Note's id.\n note_text: Text of the note.\n\n Returns:\n Edited NoteModel.\n \"\"\"\n note = self.get_user_note(email=email, note_id=note_id)\n note.text = note_text\n note.date_edited = datetime.utcnow()\n db.session.add(note)\n db.session.commit()\n return note\n","repo_name":"yeyeto2788/NoteDown","sub_path":"python_api/notedown_api/namespaces/notes/notes_controller.py","file_name":"notes_controller.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"638927649","text":"# Author: Xu Ye \n\n'''ExperimentRunner for running experiments and writing output data to files.'''\n\nimport os\n\nimport pandas as pd\nimport vrp.third_party.cvrplib as cvrplib\n\nfrom vrp.decomp.decomposition import DecompositionRunner\nimport vrp.decomp.helpers as helpers\nfrom vrp.decomp.logger import logger\nfrom vrp.decomp.constants import *\n\n\nlogger = logger.getChild(__name__)\n\n\nclass ExperimentRunner:\n def __init__(self, solver, benchmarks, num_clusters_range, repeat_n_times, output_file_name, sleep_time=10) -> None:\n self.benchmarks = benchmarks\n self.num_clusters_range = num_clusters_range\n self.repeat_n_times = repeat_n_times\n self.output_file_name = output_file_name\n self.experiments = []\n self.solver = solver\n self.decomp_runner = None\n self.sleep_time = sleep_time\n\n\n def add_experiement(self, experiment):\n self.experiments.append(experiment)\n\n\n def add_experiements(self, experiments):\n self.experiments.extend(experiments)\n\n\n def get_all_decomp(self, instance_name, experiment_name):\n '''write/log all results, not just best found'''\n\n logger.info('')\n exp_header = f'Running experiment: {experiment_name} on instance {instance_name}'\n logger.info(f\"--------------- {exp_header} ---------------\")\n\n # try clustering with diff number of clusters\n min_clusters, max_clusters = self.num_clusters_range\n for num_clusters in range(min_clusters, max_clusters + 1):\n # repeat n times\n for i in range(1, self.repeat_n_times + 1):\n self.decomp_runner.decomposer.num_clusters = num_clusters\n solution = self.decomp_runner.run(in_parallel=True, num_workers=num_clusters)\n cost = solution.metrics[METRIC_COST]\n routes = solution.routes\n\n sol_header = f'Solution for experiment: {experiment_name} on instance {instance_name}'\n logger.info(f\"------ {sol_header} ------\")\n logger.info(f\"Decomp cost: {cost} with \"\n f\"{num_clusters} clusters and {len(routes)} routes; iteration {i}\")\n logger.info('')\n\n # write KPIs to excel\n excel_data = {\n KEY_INSTANCE_NAME: [instance_name],\n KEY_ITERATION: [i],\n KEY_NUM_SUBPROBS: [num_clusters],\n KEY_NUM_ROUTES: [len(routes)],\n KEY_COST: [cost],\n }\n df = pd.DataFrame(excel_data)\n helpers.write_to_excel(df, self.output_file_name, sheet_name=experiment_name)\n\n # write detailed routes to json\n ## routes_key = f'{instance_name}_{experiment_name}_{num_clusters}_{i}'\n\n # add `KEY_COST: cost,` to json data - if this value is\n # inf, then it means no feasible solution found.\n # when called on decomposed instances, if one single\n # instance finds no feasible solution, aggregated cost will\n # be inf, indicating no feasible solution found for the original\n # instance, regardless of how many routes from other instances\n # may have been collected.\n json_data = {\n KEY_INSTANCE_NAME: instance_name,\n KEY_EXPERIMENT_NAME: experiment_name,\n KEY_NUM_SUBPROBS: num_clusters,\n KEY_ITERATION: i,\n KEY_ROUTES: routes,\n KEY_COST: cost,\n }\n helpers.write_to_json(json_data, self.output_file_name)\n\n if self.repeat_n_times > 1 or (max_clusters - min_clusters) > 0:\n # let the CPU take a break after each iteration (per repetition per cluster)\n helpers.sleep(self.sleep_time, __name__)\n\n # break after each experiment\n helpers.sleep(self.sleep_time, __name__)\n\n\n def run_experiments(self, inst):\n for decomposer in self.experiments:\n self.decomp_runner = DecompositionRunner(inst, decomposer, self.solver)\n self.get_all_decomp(inst.extra['name'], decomposer.name)\n\n\n def read_instance(self, dir_name, instance_name):\n logger.info('')\n logger.info(f'Benchmark instance name: {instance_name}')\n\n file_name = os.path.join(CVRPLIB, dir_name, instance_name)\n inst, bk_sol = cvrplib.read(\n instance_path=f'{file_name}.txt',\n solution_path=f'{file_name}.sol'\n )\n min_tours = helpers.get_min_tours(inst.demands, inst.capacity)\n\n logger.info(f'Min num tours: {min_tours}')\n logger.info(f'Best known cost: {bk_sol.cost} with {len(bk_sol.routes)} routes')\n\n return inst, bk_sol\n\n\n def get_no_decomp_solution(self, inst):\n # call solver directly without decomposition\n logger.info('')\n solution = self.solver.solve(inst)\n cost = solution.metrics[METRIC_COST]\n logger.info(f'No decomp cost: {cost} with {len(solution.routes)} routes')\n json_data = {\n KEY_INSTANCE_NAME: inst.extra['name'],\n KEY_EXPERIMENT_NAME: 'No decomp',\n KEY_ROUTES: solution.routes,\n KEY_COST: cost,\n }\n helpers.write_to_json(json_data, self.output_file_name)\n return cost, solution.routes\n\n\n def run(self, experiments_only=False):\n for benchmark, benchmark_dir_name in self.benchmarks:\n for instance_name in benchmark:\n inst, bk_sol = self.read_instance(benchmark_dir_name, instance_name)\n converted_inst = helpers.convert_cvrplib_to_vrp_instance(inst)\n\n if not experiments_only:\n no_decomp_cost, no_decomp_routes = self.get_no_decomp_solution(converted_inst)\n\n # prepare data to be written to excel\n excel_data = {\n KEY_INSTANCE_NAME: [instance_name],\n f'{KEY_NUM_ROUTES}_BK': [len(bk_sol.routes)],\n f'{KEY_NUM_ROUTES}_NO_decomp': [len(no_decomp_routes)],\n f'{KEY_COST}_BK': [bk_sol.cost],\n f'{KEY_COST}_NO_decomp': [no_decomp_cost],\n }\n\n # write base reference data to excel in its own tab\n # subsequently each experiment will also write its output\n # in its own tab - one tab per experiment, one row per instance\n df = pd.DataFrame(excel_data)\n ## df = df.reindex(sorted(df.columns), axis=1)\n helpers.write_to_excel(df, self.output_file_name, sheet_name='Basis')\n\n # run all the decomposition experiments on current VRP instance\n self.run_experiments(converted_inst)\n\n","repo_name":"lifelikeleaf/vrp","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25797523866","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n#for video capture through the camera, it has to go through the gStreamer by invoking the correct command\n#below is the command for the econ systems camera See3CAM_CU30\ncap=cv2.VideoCapture(\"v4l2src device=/dev/video1 ! video/x-raw, width=(int)1280, height=(int)720, format=UYVY ! videoconvert ! video/x-raw, format=RGB ! videoconvert !appsink\", cv2.CAP_GSTREAMER)\n\nwhile(True):\n ret, frame=cap.read()\n #gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) \n cv2.imshow(\"Web cam\",frame)\n cv2.imshow(\"grayscale video\",frame)\n # When we press Q on our keyboard we will exit a video\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n# Release the video capture object\ncap.release()\n\n# Closes all the frames\ncv2.destroyAllWindows()\n\n","repo_name":"thariqkhalid/JetsonExperiments","sub_path":"camera_test.py","file_name":"camera_test.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5522582465","text":"from enum import Enum\nfrom TransfersList import TransferList\nfrom StudioEnums import *\nimport platform\n\n\nclass Item:\n\n desired_stock_rishpon = 0\n desired_stock_tachana = 0\n\n def __init__(self, code, description, color):\n self.code = code\n self.description = description\n self.color = color\n self.age = \"\"\n self.stock = []\n self.desired_stock = []\n self.initialStock = []\n self.isFewSizes = False\n return\n\n def __hash__(self):\n return hash(self.code)\n\n def __eq__(self, other):\n return self.code == other.code\n\n def __ne__(self, other):\n return not(self == other)\n\n \"\"\"\n Prints the stock of the item in a friendly representation.\n \"\"\"\n def printStock(self):\n raise NotImplementedError(\"Please Implement this method\")\n\n \"\"\"\n Writes the stock to HTML file - Each row is written into a

tag.\n \"\"\"\n def writeStockToFile(self, fileToWrite):\n raise NotImplementedError(\"Please Implement this method\")\n\n \"\"\"\n Update the actual stock of a given size for this item.\n \"\"\"\n def update_stock(self, store, size, amount):\n self.stock[store.value][size] = amount\n return\n\n \"\"\"\n Update the desired stock of a given size for this item.\n \"\"\"\n def update_desired_stock(self, store, size, amount):\n self.desired_stock[store.value][size] = amount\n\n \"\"\"\n Updates all of the stock within a given store by the entered amounts.\n \"\"\"\n def updateStockByStore(self, store, xs, s, m, l, xl, w):\n raise NotImplementedError(\"Please Implement this method\")\n\n \"\"\"\n Auto fills the desired stock of the item with the current desired stock value.\n \"\"\"\n def auto_update_desired_stock(self, numOfSizes):\n for size in range(numOfSizes):\n self.update_desired_stock(Stores.RISHPON, size, Item.desired_stock_rishpon)\n for size in range(numOfSizes):\n self.update_desired_stock(Stores.TACHANA, size, Item.desired_stock_tachana)\n return\n\n \"\"\"\n Updates the desired stock value for Rishpon and Tachana.\n \"\"\"\n def update_desired_values(rishpon, tachana):\n Item.desired_stock_rishpon = rishpon\n Item.desired_stock_tachana = tachana\n return\n\n \"\"\"\n Updates the desired stock of the item according to the current stock in stores.\n This may help when the given desired stock is lower than the correct desired amount\n so the program can automaticlly learn the amount that should be held in each store.\n \"\"\"\n def update_desired_stock_from_current_stock(self, store, numOfSizes):\n num_of_sizes_higher_than_desired = 0\n num_of_non_zero_sizes = 0\n desired_stock = desired_rishpon if store == Stores.RISHPON else desired_tachana\n for size in range(numOfSizes):\n if self.stock[store.value][size] > 0:\n num_of_non_zero_sizes += 1\n if self.stock[store.value][size] > desired_stock:\n num_of_sizes_higher_than_desired += 1\n if num_of_sizes_higher_than_desired / num_of_non_zero_sizes >= 0.5:\n self.desired_stock = [x + 1 for x in self.desired_stock]\n return\n\n \"\"\"\n Makes the transfers of the current item between the stores.\n \"\"\"\n def transfer(self, numOfSizes, warnings_file):\n # First transfer from Warehouse to stores and check for negative stock.\n for size in range(numOfSizes):\n self.checkStockValidity(size, warnings_file) # Checks for negative stock\n rishpon_dist, tachana_dist = self.getDistances(size)\n if rishpon_dist > 0: # TRANSFER FROM WAREHOUSE TO RISHPON\n while (rishpon_dist > 0 and self.stock[Stores.WAREHOUSE.value][size] > 0):\n self.transferFromTo(TransferFromTo.WAREHOUSE_TO_RISHPON, size, 1)\n rishpon_dist -= 1\n\n if tachana_dist > 0: # TRANSFER FROM WAREHOUSE TO TACHANA\n while (tachana_dist > 0 and self.stock[Stores.WAREHOUSE.value][size] > 0):\n self.transferFromTo(TransferFromTo.WAREHOUSE_TO_TACHANA, size, 1)\n tachana_dist -= 1\n\n # Empty warehouse if only few pieces left there.\n # self.transferLastPiecesFromWarehouse()\n\n # Then transfer between stores.\n for size in range(numOfSizes):\n rishpon_dist, tachana_dist = self.getDistances(size)\n # TRANSFER FROM RISHPON TO TACHANA\n while tachana_dist > 0 and rishpon_dist < tachana_dist:\n rishpon_dist, tachana_dist = self.getDistances(size)\n self.transferFromTo(TransferFromTo.RISHPON_TO_TACHANA, size, 1)\n tachana_dist -= 1\n rishpon_dist += 1\n\n # Transfer from Tachana to Rishpon if the dist' of Rishpon is higher.\n while rishpon_dist > 0 and (tachana_dist <= 0 or (rishpon_dist - tachana_dist >= 2)):\n rishpon_dist, tachana_dist = self.getDistances(size)\n if rishpon_dist == 1 and tachana_dist == 0: # Keep it as is\n break\n else:\n self.transferFromTo(TransferFromTo.TACHANA_TO_RISHPON, size, 1)\n tachana_dist += 1\n rishpon_dist -= 1\n\n # If Tachana has more stock than Rishpon - Transfer it to Rishpon\n extraAmount = self.stock[Stores.TACHANA.value][size] - self.stock[Stores.RISHPON.value][size]\n if extraAmount > 0:\n self.transferFromTo(TransferFromTo.TACHANA_TO_RISHPON, size, extraAmount)\n\n if not self.isFewSizes: # Transfer between stores if one store has only few pieces left.\n self.transferLastPiecesFromStores(warnings_file)\n return\n\n \"\"\"\n Checks if the stock of a given size is negative in one of the stores.\n \"\"\"\n def checkStockValidity(self, size, warnings_file):\n negativeStockWarning = \"

מלאי שלילי עבור פריט \" + self.description + \" בצבע \" + self.color + \" במחסן!\" + \"

\"\n if self.stock[Stores.WAREHOUSE.value][size] < 0:\n self.stock[Stores.WAREHOUSE.value][size] = 0\n warnings_file.write(negativeStockWarning.encode(\"utf8\"))\n if self.stock[Stores.RISHPON.value][size] < 0:\n self.stock[Stores.RISHPON.value][size] = 0\n warnings_file.write(negativeStockWarning.encode(\"utf8\"))\n if self.stock[Stores.TACHANA.value][size] < 0:\n self.stock[Stores.TACHANA.value][size] = 0\n warnings_file.write(negativeStockWarning.encode(\"utf8\"))\n return\n\n \"\"\"\n Tansfers last pieces between the stores if the stock is not full enough.\n \"\"\"\n def transferLastPiecesFromStores(self, warnings_file, numOfSizes, sizesDict):\n num_of_sizes_rishpon = 0\n num_of_items_rishpon = 0\n num_of_sizes_tachana = 0\n num_of_items_tachana = 0\n sizes_rishpon_for_warning = \"\"\n sizes_tachana_for_warning = \"\"\n\n # Calculates the num of pieces & num of different sizes in each store.\n for size in range(numOfSizes):\n if self.stock[Stores.RISHPON.value][size] > 0:\n sizes_rishpon_for_warning += str(sizesDict[size]) + \"_&_\"\n num_of_sizes_rishpon += 1\n num_of_items_rishpon += self.stock[Stores.RISHPON.value][size]\n if self.stock[Stores.TACHANA.value][size] > 0:\n sizes_tachana_for_warning += str(sizesDict[size]) + \"_&_\"\n num_of_sizes_tachana += 1\n num_of_items_tachana += self.stock[Stores.TACHANA.value][size]\n\n lastPiecesWarning = \"

נשארו המידות \" + sizes_tachana_for_warning[:-3] + \" מדגם \" + self.description + \" בצבע \" + self.color + \" בסה״כ \" + str(num_of_items_tachana) + \" פריטים אחרונים בתחנה\"+ \"

\"\n if num_of_items_tachana == 0: # If Tachana's stock is empty.\n return\n if num_of_sizes_tachana == 1: # Transfers stock if only one size remain and the size is different than rishpon\n if num_of_items_tachana == 1:\n self.transferAllStockOfStore(TransferFromTo.TACHANA_TO_RISHPON)\n else:\n warnings_file.write(lastPiecesWarning.encode(\"utf8\"))\n if num_of_items_tachana == 2 and num_of_sizes_tachana == 2: # Transfer items to Rishpon\n shouldTransferAll = self.checkForLastSizePair()\n if shouldTransferAll:\n self.transferAllStockOfStore(TransferFromTo.TACHANA_TO_RISHPON)\n else:\n warnings_file.write(lastPiecesWarning.encode(\"utf8\"))\n elif num_of_items_tachana > 2 and num_of_sizes_tachana == 2:\n warnings_file.write(lastPiecesWarning.encode(\"utf8\"))\n return\n\n \"\"\"\n Checks whether or not the whole stock of the warehouse should be transferred\n to the stores. If it should, decide to which store according to the distances.\n As the distance higher, it means that the stock in that store is lower.\n The function returns the new distances of the stores (in a tuple).\n \"\"\"\n def transferLastPiecesFromWarehouse(self,numOfSizes):\n num_of_sizes_warehouse = 0\n num_of_items_warehouse = 0\n\n for size in range(numOfSizes):\n if self.stock[Stores.WAREHOUSE.value][size] > 0:\n num_of_sizes_warehouse += 1\n num_of_items_warehouse += self.stock[Stores.WAREHOUSE.value][size]\n\n if num_of_items_warehouse == 0:\n return\n\n if num_of_sizes_warehouse == 1 and num_of_items_warehouse == 1:\n self.transferAllStockOfWarehouse()\n if num_of_sizes_warehouse == 2 and num_of_items_warehouse == 2:\n self.transferAllStockOfWarehouse()\n return\n\n \"\"\"\n Transfers the last pieces that remain in the Warehouse.\n \"\"\"\n def transferAllStockOfWarehouse(self, numOfSizes):\n for size in range(numOfSizes):\n rishpon_dist, tachana_dist = self.getDistances(size)\n if self.stock[Stores.WAREHOUSE.value][size] > 0:\n if rishpon_dist >= tachana_dist and rishpon_dist >= 0:\n self.transferFromTo(TransferFromTo.WAREHOUSE_TO_RISHPON, size, 1)\n if tachana_dist > rishpon_dist and tachana_dist >= 0:\n self.transferFromTo(TransferFromTo.WAREHOUSE_TO_TACHANA, size, 1)\n return\n\n \"\"\"\n Transfers the last pieces that remain in 'fromStore' to 'toStore'.\n params: fromStore and toStore are of type Stores enum.\n \"\"\"\n def transferAllStockOfStore(self, fromToStore, numOfSizes):\n fromStore = fromToStore.fromStore\n for size in range(numOfSizes):\n amount = self.stock[fromStore.value][size]\n if amount > 0:\n self.transferFromTo(fromToStore, size, amount)\n return\n\n \"\"\"\n Checks if the sizes remain in the Tachana Store are too different from one another.\n If they are close return False, else True.\n \"\"\"\n def checkForLastSizePair(self):\n raise NotImplementedError(\"Please Implement this method in subClasses\")\n\n \"\"\"\n Transfers the item in the given size from 'fromStore' to 'toStore' (kept inside\n 'fromToStore') in the given amount.\n params: 'fromToStore' is from type TransferFromTo enum.\n \"\"\"\n def transferFromTo(self, fromToStore, size, amount):\n self.stock[fromToStore.fromStore.value][size] -= amount\n self.stock[fromToStore.toStore.value][size] += amount\n TransferList.add_transfer(self, fromToStore, size, amount)\n return\n\n \"\"\"\n Returns the distance between the desired stock of the given size in rishpon and tachana.\n \"\"\"\n def getDistances(self, size):\n rishpon_dist = self.desired_stock[Stores.RISHPON.value][size] - self.stock[Stores.RISHPON.value][size]\n tachana_dist = self.desired_stock[Stores.TACHANA.value][size] - self.stock[Stores.TACHANA.value][size]\n return rishpon_dist, tachana_dist\n\n \"\"\"\n Sets the item to be an item with small range of sizes that shouldn't be\n transfered if few pieces left in store.\n \"\"\"\n def setItemAsFewSizes(self):\n self.isFewSizes = True\n return\n\n \"\"\"\n Gets the number of sizes that this item may have.\n \"\"\"\n def getNumOfSizes(self):\n raise NotImplementedError(\"Please Implement this method\")\n\n \"\"\"\n Saves the current 'stock' field in the 'initialStock' field.\n \"\"\"\n def saveInitialStock(self, numOfSizes):\n self.initialStock = [[self.stock[y][x] for x in range(numOfSizes) ] for y in range(NUM_OF_STORES)]\n\n \"\"\"\n Returns True if this item in size 'size' doesn't exist in the given 'store'.\n \"\"\"\n def isEmpty(self, store, size):\n return self.initialStock[store.value][size] == 0\n","repo_name":"alonyaar/StockTransfers","sub_path":"Item.py","file_name":"Item.py","file_ext":"py","file_size_in_byte":12882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"72555231876","text":"\"\"\"empty message\n\nRevision ID: 75604619e197\nRevises: 5b763af0a4c7\nCreate Date: 2021-08-25 20:03:32.017118\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '75604619e197'\ndown_revision = '5b763af0a4c7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('todolists', 'completed')\n op.add_column('todos', sa.Column('complete', sa.Boolean(), nullable=False))\n op.drop_column('todos', 'completed')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('todos', sa.Column('completed', sa.BOOLEAN(), autoincrement=False, nullable=False))\n op.drop_column('todos', 'complete')\n op.add_column('todolists', sa.Column('completed', sa.BOOLEAN(), autoincrement=False, nullable=False))\n # ### end Alembic commands ###\n","repo_name":"70Ophiuchi/TodoList","sub_path":"migrations/versions/75604619e197_.py","file_name":"75604619e197_.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39122471239","text":"print (1 < 1)\nprint (1 <= 1)\nprint (1 > 1)\nprint (1 >= 1)\nprint (1 == 1)\nprint (1 != 1)\n\nname = input(\"What's your name?\")\nif name == \"Jessica\":\n print(\"Hello, nice to see you {}\".format(name))\nelif name == \"Danielle\":\n print(\"Hello, you are a great person!\")\nelif name == \"Kingston\": \n print(\"Hi, {}, let's have lunch soon!\".format(name))\nelse:\n print(\"Have a nice day!\")\n\n# Conditionals in calculator\ndef add():\n a = float(input(\"Enter a number.\"))\n b = float(input(\"Enter another number.\"))\n print(a + b)\n\ndef substraction():\n a = float(input(\"Enter a number.\"))\n b = float(input(\"Enter another number.\"))\n print(a - b)\n\ndef multiply():\n a = float(input(\"Enter a number.\"))\n b = float(input(\"Enter another number.\"))\n print(a * b)\n\ndef divide():\n a = float(input(\"Enter a number.\"))\n b = float(input(\"Enter another number.\"))\n print(a / b)\n\non = True \nwhile on:\n operation = input(\"What do you want calculate? type +, -, *, /, or quit:\")\n if operation == '+':\n add()\n elif operation == '-':\n substraction\n elif operation == '*':\n multiply()\n elif operation == '/':\n divide()\n elif operation == 'quit':\n on = False\n else:\n print(\"That operation is not available.\")","repo_name":"gagaspanduw/learn-python","sub_path":"chapter4-conditionals.py","file_name":"chapter4-conditionals.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"12179678794","text":"#-*- coding: utf-8 -*-\nimport requests\nimport requests\nfrom bs4 import BeautifulSoup\nimport bs4\nimport lxml\nimport html5lib\n# sourceCode = 'http://media.daum.net/cp/310'\n# codeTxt=sourceCode.text\n# codTxt_bs = BeautifulSoup(codeTxt, 'lxml')\n#\n# for title in codTxt_bs.select('title'):\n# print(title.txt)\n\n# 다음 JTBC 뉴스 스크래핑 예제\n#http://media.daum.net/cp/310\npress = [310] #언론사\ndate = [20180205] #년월일\npage = [1,2,3] #페이지\n\nnew_title = [] #뉴스 제목\nnew_desc = [] #뉴스 간략소개\n\n#스크래핑할 URL지정\n#http://media.daum.net/cp/310?page=2®Date=20180205\nURL = 'http://media.daum.net/cp/' + str(press[0]) + '?page=' + str(page[0]) + '®Date=' + str(date[0])\n\n#스크래핑 해서 소스를 source_code에 저장.\nsource_code = requests.get(URL)\n\n#중간 결과 출력\n#print(source_code.text)\n\n#텍스트 추출을 위해 lxml로 태그 분석(메모리 적재)\nplain_text = source_code.text\nsoup = BeautifulSoup(plain_text, 'lxml')\n\n#기사 제목 추출\ncnt = 1\nfor title in soup.select(\"a['class=link_txt']\"):\n if(cnt > 15): break\n # print(title.text.strip())\n new_title.append(title.text.strip())\n cnt += 1\n\n#기사 간단소개 추출. span > link_txt\nprint(''' \n====기사 간단 소개 추출내용==== \n''')\n\ncnt = 1\nfor title in soup.select(\"span['class=link_txt']\"):\n if(cnt > 15): break\n new_desc.append(title.text.strip())\n cnt += 1\n\nfor i in range(0, 15):\n print(new_title[i])\n print(\"%s\\n\" % (new_desc[i]))\n\n","repo_name":"DreamingDataScientist/Learnning_R_Python","sub_path":"Python/웹분석/daum_jtbc.py","file_name":"daum_jtbc.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74366402116","text":"# -*- coding: utf-8 -*-\nfrom base_linklist import AbstractList,LNode\n\n\nclass LList(AbstractList):\n '''单链表 \n '''\n def __init__(self):\n self._head = None\n\n def is_empty(self):\n return self._head is None\n\n def get_length(self):\n p = self._head\n length = 0\n while p!=0:\n length+=1\n p = p.next\n return length\n\n def prepend(self, elem):\n self._head = LNode(elem, self._head)\n\n def pop(self):\n if self._head is None:\n raise Exception('Underflow in pop')\n e = self._head.elem\n self._head = self._head.next\n return e\n\n def append(self, elem):\n if self._head is None:\n self._head = LNode(elem)\n return\n p = self._head\n while p.next is not None:\n p = p.next\n p.next = LNode(elem)\n\n def pop_last(self):\n if self._head is None:\n raise Exception('Underflow in pop')\n p = self._head\n if p.next is None:\n e = p.elem\n self._head = None\n return e\n while p.next.next is not None:\n p = p.next\n e = p.next.elem\n p.next = None\n return e\n\n def find(self, pred):\n p = self._head\n while p is not None:\n if pred(p.elem):\n return p.elem\n p = p.next\n\n def printall(self):\n p = self._head\n while p is not None:\n print(p.elem, end='')\n if p.next is not None:\n print(',', end='')\n p = p.next\n print('')\n\n def foreach(self, proc):\n p = self._head\n while p is not None:\n proc(p.elem)\n p = p.next\n\n def elements(self):\n p = self._head\n while p is not None:\n yield p.elem\n p = p.next\n\n def nodes(self):\n p = self._head\n while p is not None:\n yield p\n p = p.next\n\n def filter(self, pred):\n p = self._head\n while p is not None:\n if pred(p.elem):\n yield p.elem\n p = p.next\n\n def index(self, elem):pass#search\n def insert(self, elem, i):pass\n def get_item(self):pass\n def remove(self, i):pass#del\n \n\nif __name__ == '__main__':\n mlist1 = LList()\n for i in range(10):\n mlist1.prepend(i)\n for i in range(11,20):\n mlist1.append(i)\n mlist1.printall()\n print('-------')\n mlist1.foreach(print)\n\n ","repo_name":"yabaoya/algorithm","sub_path":"linklist/llist.py","file_name":"llist.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5449995428","text":"## {{{ http://code.activestate.com/recipes/576832/ (r2)\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch, mm\n\nclass NumberedCanvas(canvas.Canvas):\n def __init__(self, *args, **kwargs):\n canvas.Canvas.__init__(self, *args, **kwargs)\n self._saved_page_states = []\n\n def showPage(self):\n self._saved_page_states.append(dict(self.__dict__))\n self._startPage()\n\n def save(self):\n \"\"\"add page info to each page (page x of y)\"\"\"\n num_pages = len(self._saved_page_states)\n for state in self._saved_page_states:\n self.__dict__.update(state)\n self.draw_page_number(num_pages)\n canvas.Canvas.showPage(self)\n canvas.Canvas.save(self)\n\n def draw_page_number(self, page_count):\n self.setFont('Times-Roman', 10)\n self.drawRightString(200 * mm, 0.25 * inch,\n \"Page %d/%d\" % (self._pageNumber, page_count))\n","repo_name":"fgaudin/aemanager","sub_path":"custom_canvas.py","file_name":"custom_canvas.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"62"} +{"seq_id":"74070353478","text":"from PIL import Image\n\ndef complete(s):\n if len(s) > 1:\n return s\n return \"0\" + s\n\ndef toString(number):\n r = complete(hex(number[0]).replace(\"0x\", \"\"))\n g = complete(hex(number[1]).replace(\"0x\", \"\"))\n b = complete(hex(number[2]).replace(\"0x\", \"\"))\n \n return r + g + b + \" \"\n\nfileIn = input(\"Arquivo de entrada: \")\nfileOut = input(\"Arquivo de saida: \")\nimageWidth = int(input(\"Largura da imagem: \"))\nimageHeight = int(input(\"Altura da imagem: \"))\n\nimage = Image.open(fileIn)\npixels = image.load()\n\nout_file = open(fileOut, \"w\")\n\nfor y in range(imageHeight):\n for x in range(imageWidth):\n out_file.write(toString(pixels[x,y]))\n","repo_name":"geniltonbarbosadasilva/projeto_graphics_card","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"4842582575","text":"import telebot\nfrom telebot import types\nimport json\nfrom multiprocessing import Process, Queue\nimport random\nimport flask\nimport os\nimport _thread\nimport emoji\nfrom sender import start_sender\nfrom functions import *\n\ntimes = [\"6:00\", \"9:00\", \"12:00\", \"15:00\", \"18:00\", \"21:00\", \"0:00\", \"Other\"]\n\ncredentials = open_json(\"safety/credentials.json\")\nTOKEN = credentials[\"main\"][\"token\"]\nbot = telebot.TeleBot(TOKEN)\n\nqueue = Queue(maxsize=100)\nmax_questions = 100\ncache = {}\nstates = {\n \"words\": \"asking words\",\n \"everyday\": \"asking time\"\n}\n\nwords = open_json(\"data/words.json\")\n\nmeanings = list(map(lambda x: x[\"meaning\"], words))\nwords_list = list(map(lambda x: x[\"word\"], words))\n\nposts_list = open_json(\"data/posts.json\")\npoststextlist = {v: k for k, v in enumerate(posts_list)}\n\n\nbookslist = list(open_json(\"data/books.json\"))\n\nbooktextlist = {}\nfor i, book in enumerate(bookslist):\n booktextlist[book[\"text\"]] = i\n\nofficialslist = open_json(\"data/officals.json\")\nofficialstextlist = {}\n\nfor i, official in enumerate(officialslist):\n officialstextlist[official[\"text\"]] = i\n\nbuttons = open_json(\"data/buttons.json\")\n\nwith open(\"data/sat.txt\", \"r\", encoding=\"utf-8\") as f:\n sattext = f.read()\n\nwith open(\"data/about.txt\", \"r\", encoding=\"utf-8\") as k:\n abouttext = k.read()\n\n\ndef checking_id(chat_id):\n if chat_id not in cache:\n cache[chat_id] = {\n \"dictionary\": []\n }\n\n\ndef user_in_cache(message):\n return message.json[\"chat\"][\"id\"] in cache.keys()\n\n\ndef cached_state(message, state):\n if user_in_cache(message):\n try:\n return cache[message.json[\"chat\"][\"id\"]][\"state\"] == state\n except KeyError:\n pass\n return False\n\n\ndef standard_number_questions():\n return createKeyboardWithMenu(4, [\"10\", \"30\", \"50\", \"70\"])\n\n\n@bot.message_handler(commands=[\"menu\"])\ndef menu(chat_id):\n markup = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)\n markup.add(\n *list(\n map(\n lambda x: types.KeyboardButton(\n text=emoji.emojize(x),),\n buttons.keys()\n )\n )\n )\n bot.send_message(chat_id, \"Menu\", reply_markup=markup)\n\n\ndef what(message):\n chat_id = message.json[\"chat\"][\"id\"]\n bot.send_message(chat_id, text=sattext)\n\n\ndef about(message):\n chat_id = message.json[\"chat\"][\"id\"]\n bot.send_message(chat_id, text=abouttext)\n\n\n@bot.message_handler(func=lambda message: message.text == \"Back to menu\")\ndef back_to_menu(message):\n chat_id = message.json[\"chat\"][\"id\"]\n if user_in_cache(message):\n cache[chat_id] = pop_keys_from_dict(cache[chat_id], list(cache[chat_id].keys()))\n menu(chat_id)\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n chat_id = message.json[\"chat\"][\"id\"]\n if user_in_cache(message):\n cache.pop(chat_id)\n cache[chat_id] = {\n \"dictionary\": []\n }\n bot.send_message(chat_id, \"Hi! This is a bot that helps you in preparation for the SAT\")\n menu(chat_id)\n\n\n@bot.message_handler(content_types=['document'])\ndef add_book(message):\n file_id = message.document.file_id\n title = message.document.file_name\n bookslist.append({\"text\": title, \"id\": file_id})\n print(bookslist[-1])\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"menu\")\ndef menu_calling(call):\n chat_id = call.message.json[\"chat\"][\"id\"]\n menu(chat_id)\n\n\n@bot.message_handler(func=lambda message: emoji.demojize(message.text, use_aliases=True) in buttons.keys())\ndef calling(message):\n checking_id(message.json[\"chat\"][\"id\"])\n print(message.text)\n message_type = buttons[emoji.demojize(message.text, use_aliases=True)]\n if message_type == \"books\":\n books(message)\n elif message_type == \"officials\":\n officials(message)\n elif message_type == \"tests\":\n print(\"tests\")\n elif message_type == \"words\":\n words_(message)\n elif message_type == \"new_words\":\n new_word(message)\n elif message_type == \"what\":\n what(message)\n elif message_type == \"about\":\n about(message)\n elif message_type == \"everyday\":\n new_word(message)\n elif message_type == \"posts\":\n posts(message)\n elif message_type == \"dictionary\":\n show_dictionary(message)\n\n\ndef get_answer_keyboard(question, n=4, width=2):\n answers = []\n right_answer = words[words_list.index(question)][\"meaning\"]\n for i in range(n - 1):\n new_a = {\"word\": \"\"}\n while new_a[\"word\"] in [\"\", question]:\n new_a = random.choice(words)\n answers.append(new_a[\"meaning\"])\n answers.append(right_answer)\n random_answers = []\n for i in range(len(answers)):\n random_answers.append(answers.pop(random.randint(0, len(answers) - 1)))\n return createKeyboardWithMenu(width, random_answers)\n\n\ndef get_questions(n):\n questions = []\n for i in range(n):\n new_q = {\"word\": \"\"}\n while new_q[\"word\"] in [\"\"] + questions:\n new_q = random.choice(words)\n questions.append(new_q[\"word\"])\n return questions\n\n\ndef send_question(chat_id):\n current_question = cache[chat_id]['current_question']\n bot.send_message(\n chat_id,\n f\"Word #{ current_question + 1 }: {cache[chat_id]['questions'][current_question]}\",\n reply_markup=get_answer_keyboard(cache[chat_id][\"questions\"][current_question])\n )\n\n\n@bot.message_handler(func=lambda message: message.text == \"words\")\ndef words_(message):\n chat_id = message.json[\"chat\"][\"id\"]\n if \"questions\" in cache[chat_id]:\n send_question(chat_id)\n return\n cache[chat_id].update({\n \"state\": states[\"words\"],\n \"current_question\": -1,\n \"total_question\": 0,\n \"right_answers\": 0,\n \"questions\": []\n })\n bot.send_message(\n chat_id, \n \"Choose amount of questions\",\n reply_markup=standard_number_questions()\n )\n\n\n@bot.message_handler(func=lambda message: cached_state(message, states[\"words\"]))\ndef next_word(message):\n chat_id = message.json[\"chat\"][\"id\"]\n if len(cache[chat_id][\"questions\"]) == 0:\n if not is_int(message.text):\n bot.send_message(\n chat_id, \n \"Choose amount of questions\",\n reply_markup=standard_number_questions()\n )\n return\n number_questions = int(message.text)\n if number_questions > max_questions:\n bot.send_message(\n chat_id, \n \"Choose amount of questions\" + f\" not bigger than{ max_questions }\",\n reply_markup=standard_number_questions()\n )\n return\n cache[chat_id].update({\n \"state\": states[\"words\"],\n \"current_question\": 0,\n \"total_question\": number_questions,\n \"right_answers\": 0,\n \"questions\": get_questions(number_questions)\n })\n send_question(chat_id)\n return\n \n answer = message.text\n\n right_answer = words[words_list.index(cache[chat_id][\"questions\"][cache[chat_id][\"current_question\"]])][\"meaning\"]\n if answer == right_answer:\n bot.send_message(chat_id, emoji.emojize(\"Correct :white_check_mark:\", use_aliases=True))\n cache[chat_id][\"right_answers\"] += 1\n else:\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(\"Add to the dictionary\", callback_data=\"dictionary_append: \" +\n cache[chat_id][\"questions\"][cache[chat_id][\"current_question\"]]))\n bot.send_message(chat_id, emoji.emojize(\"Incorrect :x: \\n\", use_aliases=True) +\n \"Right answer is \" + right_answer, reply_markup=markup)\n\n if cache[chat_id][\"current_question\"] == cache[chat_id][\"total_question\"] - 1:\n bot.send_message(\n chat_id,\n f\"You have finished the test! You have { cache[chat_id]['right_answers'] }\"\n f\" out of { cache[chat_id]['total_question'] } questions\",\n reply_markup=createKeyboardWithMenu(1, [])\n )\n return\n cache[chat_id][\"current_question\"] += 1\n send_question(chat_id)\n\n\n@bot.callback_query_handler(func=lambda call: \"dictionary_append:\" in call.data)\ndef dictionary_append(call):\n chat_id = call.message.json[\"chat\"][\"id\"]\n word = \" \".join(call.data.split(\" \")[1:])\n cache[chat_id][\"dictionary\"].append(word)\n bot.answer_callback_query(call.id, word + \" successfully added\", show_alert=True)\n\n\ndef books(call):\n chat_id = call.json[\"chat\"][\"id\"]\n markup = types.InlineKeyboardMarkup()\n for book in bookslist:\n btn_book = types.InlineKeyboardButton(text=book[\"text\"], callback_data=book[\"text\"])\n markup.add(btn_book)\n bot.send_message(chat_id, emoji.emojize(\"SAT Books :exclamation:\", use_aliases=True), reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data in booktextlist.keys())\ndef send_books(call):\n chat_id = call.message.json[\"chat\"][\"id\"]\n book = bookslist[booktextlist[call.data]]\n try:\n if book[\"image_id\"]:\n bot.send_photo(chat_id, book[\"image_id\"])\n except:\n pass\n bot.send_document(chat_id, book[\"id\"])\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"officials\")\ndef officials(call):\n chat_id = call.json[\"chat\"][\"id\"]\n markup = types.InlineKeyboardMarkup()\n for offical in officialslist:\n btn_book = types.InlineKeyboardButton(text=offical[\"text\"], callback_data=offical[\"text\"])\n markup.add(btn_book)\n bot.send_message(chat_id, \"Past SAT tests\", reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data in officialstextlist.keys())\ndef send_books(call):\n chat_id = call.message.json[\"chat\"][\"id\"]\n bot.send_document(chat_id, officialslist[officialstextlist[call.data]][\"id\"])\n\n\n@bot.message_handler(commands=[\"everyday\"])\ndef new_word(message):\n chat_id = message.json[\"chat\"][\"id\"]\n bot.send_message(chat_id, \"Choose Hour\", reply_markup=createKeyboardWithMenu(row_width=4,args=times, onetime=True))\n\n\n@bot.message_handler(func= lambda message: message.text in times)\ndef set_time(message):\n chat_id = message.json[\"chat\"][\"id\"]\n text = message.text\n if text == \"Other\":\n other(message)\n else:\n time = text.split(\":\")\n sec = int(time[0]) * 3600 + int(time[1]) * 60\n smth = {\n \"sender_id\": chat_id,\n \"time\": sec,\n \"hours\": text\n }\n try:\n queue.put_nowait(smth)\n except Exception as e:\n print(e)\n\n\ndef other(message):\n chat_id = message.json[\"chat\"][\"id\"]\n cache[chat_id].update({\n \"state\": states[\"everyday\"]\n })\n bot.send_message(chat_id, \"Enter time in 24 hour format (e.g. 13:15)\")\n\n\n@bot.message_handler(func=lambda message: cached_state(message, states[\"everyday\"]))\ndef opt_time(message):\n chat_id = message.json[\"chat\"][\"id\"]\n text = message.text\n time = text.split(\":\")\n sec = int(time[0]) * 3600 + int(time[1]) * 60\n smth = {\n \"sender_id\": chat_id,\n \"time\": sec,\n \"hours\": text\n }\n try:\n queue.put_nowait(smth)\n except Exception as e:\n print(e)\n cache.pop(chat_id)\n\n\n@bot.message_handler(func=lambda message: message.text == \"posts\")\ndef posts(message):\n chat_id = message.json[\"chat\"][\"id\"]\n markup = types.InlineKeyboardMarkup()\n for post in posts_list:\n btn_book = types.InlineKeyboardButton(text=post[\"text\"], callback_data=post[\"text\"])\n markup.add(btn_book)\n bot.send_message(chat_id, \"Posts and advices\", reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data in poststextlist.keys())\ndef send_books(call):\n chat_id = call.message.json[\"chat\"][\"id\"]\n bot.send_document(chat_id, posts_list[poststextlist[call.data]][\"text\"])\n\n\n@bot.message_handler(func=lambda message: message.text == \"dictionary\")\ndef show_dictionary(message):\n chat_id = message.json[\"chat\"][\"id\"]\n d = cache[chat_id][\"dictionary\"]\n if len(d) == 0:\n bot.send_message(chat_id, \"There is no words in your dictionary\")\n else:\n bot.send_message(chat_id, \"Dictionary:\\n \" + \"\\n \".join(d))\n\n\ndef start_server():\n app = flask.Flask(__name__)\n\n port = int(os.getenv(\"PORT\", \"\"))\n if port == \"\":\n raise ValueError\n\n @app.route(\"/\")\n def index():\n return flask.jsonify(cache)\n\n app.run(host=\"0.0.0.0\", port=port)\n\n\nif __name__ == \"__main__\":\n p = Process(target=start_sender, args=(queue,))\n p.start()\n\n _thread.start_new_thread(start_server, ())\n bot.polling()\n p.join()\n","repo_name":"menaimar/satprepbot","sub_path":"prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":12657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7317191753","text":"\"\"\"Adds the stddev of the patches to the experiment structure\"\"\"\n\nimport os\nimport numpy as np\nimport argparse\nimport occlusion_utils as ut\nimport json\nfrom PIL import Image\n\n\ndef calculate_patch_variance(image, position):\n x_start, x_end, y_start, y_end = position\n\n return np.std(image[x_start:x_end, y_start:y_end, :])\n\n\ndef process_trial(trial, occlusion_type_index):\n patch_relative_size = ut.percentage_side_length_list[occlusion_type_index]\n patch_absolute_size = ut.occlusion_sizes_list[occlusion_type_index]\n heatmap_size = ut.heatmap_sizes_list[occlusion_type_index]\n\n list_of_positions = ut.get_list_of_occlusion_positions(\n heatmap_size, patch_absolute_size\n )\n\n fn = os.path.join(\n trial[\"queries\"],\n f\"activations_for_occlusions_of_{patch_relative_size}_percent.npy\",\n )\n\n if args.path_search != \"\":\n fn = fn.replace(args.path_search, args.path_replace)\n\n activations = np.load(fn)\n\n min_activation_idx = activations[:-1].argmin()\n max_activation_idx = activations[:-1].argmax()\n\n min_position = list_of_positions[min_activation_idx]\n max_position = list_of_positions[max_activation_idx]\n\n # load image\n query_image_path = os.path.join(trial[\"queries\"], \"query_default.png\")\n image = np.array(Image.open(query_image_path)).astype(np.float) / 255.0\n\n min_patch_std = calculate_patch_variance(image, min_position)\n max_patch_std = calculate_patch_variance(image, max_position)\n\n trial[\"min_query_patch_std\"] = \"%.12f\" % min_patch_std\n trial[\"max_query_patch_std\"] = \"%.12f\" % max_patch_std\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--input-structure\",\n required=True,\n help=\"path to the input experiment structure json\",\n)\nparser.add_argument(\n \"--output-structure\",\n required=True,\n help=\"where to save the final experiment structure \" \"with the appended info\",\n)\nparser.add_argument(\n \"--occlusion-size\",\n required=True,\n type=int,\n choices=ut.percentage_side_length_list,\n help=\"occlusion size used for the stimuli\",\n)\nparser.add_argument(\n \"--path-search\",\n required=False,\n default=\"\",\n help=\"in case the path to the raw image data has changed, this argument can be used to \"\n \"replace parts of the path with something else.\",\n)\nparser.add_argument(\n \"--path-replace\",\n required=False,\n default=\"\",\n help=\"value to replace parts of the image paths with\",\n)\n\nargs = parser.parse_args()\n\nassert (args.path_search == \"\" and args.path_replace == \"\") or (\n args.path_search != \"\" and args.path_replace != \"\"\n)\n\nocclusion_size_index = ut.percentage_side_length_list.index(args.occlusion_size)\n\nwith open(args.input_structure, \"r\") as f:\n structure = json.load(f)\n\nfor task in structure[\"tasks\"]:\n for trial in task[\"raw_trials\"]:\n process_trial(trial, occlusion_size_index)\n for trial in task[\"raw_catch_trials\"]:\n process_trial(trial, occlusion_size_index)\n for trial in task[\"trials\"]:\n process_trial(trial, occlusion_size_index)\n\nwith open(args.output_structure, \"w\") as f:\n json.dump(structure, f)\n","repo_name":"brendel-group/causal-understanding-via-visualizations","sub_path":"tools/data-generation/causal-occlusion/add_variance_baseline_values_to_structure.py","file_name":"add_variance_baseline_values_to_structure.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"62"} +{"seq_id":"35578202468","text":"from datetime import datetime\nfrom typing import Optional, Dict, Any, Union\n\nimport pandas as pd\n\nfrom feathub.feature_tables.feature_table import FeatureTable\nfrom feathub.table.table_descriptor import TableDescriptor\n\n\nclass FeathubJobDescriptor:\n \"\"\"Descriptor of a FeatHub job to run in a remote Flink cluster.\"\"\"\n\n def __init__(\n self,\n features: TableDescriptor,\n keys: Union[pd.DataFrame, TableDescriptor, None],\n start_datetime: Optional[datetime],\n end_datetime: Optional[datetime],\n sink: FeatureTable,\n local_registry_tables: Dict[str, TableDescriptor],\n allow_overwrite: bool,\n props: Dict,\n ):\n \"\"\"\n Instantiate a FeathubJobDescriptor.\n\n :param features: The table descriptor that contains the features to compute.\n :param keys: Optional. If it is TableDescriptor or DataFrame, it should be\n transformed into a table of keys. If it is not None, the\n table only include rows whose key fields match at least one\n row of the keys.\n :param start_datetime: Optional. If it is not None, the `features` table should\n have a timestamp field. And the table will only\n include features whose\n timestamp >= start_datetime. If any field (e.g. minute)\n is not specified in the start_datetime, we assume this\n field has the minimum possible value.\n :param end_datetime: Optional. If it is not None, the `features` table should\n have a timestamp field. And the table will only\n include features whose timestamp < end_datetime. If any\n field (e.g. minute) is not specified in the end_datetime,\n we assume this field has the maximum possible value.\n :param sink: Where the features write to.\n :param local_registry_tables: All the table descriptors registered in the local\n registry that are required to compute the given\n table.\n :param allow_overwrite: If it is false, throw error if the features collide with\n existing data in the given sink.\n :param props: All properties of FeatHub.\n \"\"\"\n self.features = features\n self.keys = keys\n self.start_datetime = start_datetime\n self.end_datetime = end_datetime\n self.sink = sink\n self.local_registry_tables = local_registry_tables\n self.allow_overwrite = allow_overwrite\n self.props = props\n\n def __eq__(self, other: Any) -> bool:\n return (\n isinstance(other, FeathubJobDescriptor)\n and self.features == other.features\n and self.keys == other.keys\n and self.start_datetime == other.start_datetime\n and self.end_datetime == other.end_datetime\n and self.sink == other.sink\n and self.local_registry_tables == other.local_registry_tables\n and self.allow_overwrite == other.allow_overwrite\n and self.props == other.props\n )\n","repo_name":"shxinding/feathub","sub_path":"python/feathub/processors/flink/job_submitter/feathub_job_descriptor.py","file_name":"feathub_job_descriptor.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"22408374125","text":"from probables import BloomFilter\n\nsheep_seen_bloom = BloomFilter(\n est_elements=200000, false_positive_rate=0.01\n)\n\nfor m in range(0, 100000):\n sheep_id = str(m)\n sheep_seen_bloom.add(sheep_id)\n\ndef have_i_seen(sheep_id):\n if sheep_seen_bloom.check(sheep_id):\n print(f\"I might have seen sheep {sheep_id}.\")\n else:\n print(f\"I have not seen sheep {sheep_id}.\")\n\nhave_i_seen(\"9018\")\nhave_i_seen(\"454991\")","repo_name":"simonprickett/python-probabilistic-data-structures","sub_path":"approximating_sheep_python/have_i_see_this_one.py","file_name":"have_i_see_this_one.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"62"} +{"seq_id":"34194563307","text":"class Solution:\n def findMinArrowShots(self, points: List[List[int]]) -> int:\n sorted_points = sorted(points, key = lambda x: (x[1], x[1] - x[0]))\n shot_pointer = -float('inf')\n arrow = 0\n\n for baloon in sorted_points:\n if baloon[0] > shot_pointer:\n shot_pointer = baloon[1]\n arrow += 1\n \n return arrow\n \n","repo_name":"kenenisa/CompetitiveProgramming","sub_path":"Day26/MinimumNumberofArrowstoBurstBalloons.py","file_name":"MinimumNumberofArrowstoBurstBalloons.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"861379980","text":"#################\n## hw5 #\n## r06921048 #\n## Yo-Chi Lee #\n#################\n\n\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\nimport timeit\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef printf(format, *args):\n sys.stdout.write(format % args)\n\ndef Parse(trainCSV,testCSV,hwNumber) : \n data_train=np.genfromtxt(trainCSV,delimiter=',',dtype=str)\n data_test=np.genfromtxt(testCSV,delimiter=',',dtype=str) \n\n \n x_train=[]\n y_train=[]\n x_test=[]\n y_test=[]\n \n if hwNumber=='3':\n for i in range(len(data_train)):\n data_list=list(data_train[i])\n target=data_list.pop()\n y_train.append(float(target))\n for j in range(len(data_list)):\n if j==0:\n data_list[j]=ord(data_list[j])\n else:\n data_list[j]=float(data_list[j])\n x_train.append(data_list)\n\n for i in range(len(data_test)):\n data_list=list(data_test[i])\n target=data_list.pop()\n y_test.append(float(target))\n for j in range(len(data_list)):\n if j==0:\n data_list[j]=ord(data_list[j])\n else:\n data_list[j]=float(data_list[j])\n\n x_test.append(data_list)\n \n elif hwNumber=='4':\n for i in range(len(data_train)):\n data_list=list(data_train[i])\n target=data_list.pop()\n y_train.append(float(target))\n for j in range(len(data_list)):\n number=0\n for k in data_list[j]:\n number=number+ord(k)\n \n data_list[j]=number\n x_train.append(data_list)\n boundary=len(x_train)\n for i in range(len(data_test)):\n data_list=list(data_test[i])\n #target=data_list.pop()\n y_test.append(float(0))\n for j in range(len(data_list)):\n number=0\n for k in data_list[j]:\n number=number+ord(k)\n\n data_list[j]=number\n x_test.append(data_list)\n x_train.append(data_list)\n \n imp = SimpleImputer(missing_values=95, strategy='mean')\n imp = imp.fit(x_train)\n x_train = imp.transform(x_train)\n\n imp = SimpleImputer(missing_values=95, strategy='mean')\n imp = imp.fit(x_test)\n x_test = imp.transform(x_test)\n df = pd.DataFrame(np.array(x_train))\n for i in range(0,len(x_train[0])):\n\n one_hot=pd.get_dummies(df[i] , prefix='c'+str(i) )\n df=df.join(one_hot)\n for i in range (0,len(x_train[0])):\n df=df.drop(df.columns[0],axis=1)\n\n x_total=np.array(df)\n x_train=[]\n x_test=[]\n for i in range (0,len(x_total)):\n if i 1:\n lt,gt = heap.pop(),heap.pop()\n ins = lt.insert(gt)\n heap.insert(ins)\n return heap.pop()\n\n def build_maps(self,tree):\n ab = {}\n def dfs(curr,sta):\n if curr.left:\n dfs(curr.left,sta+'0') #add 0\n if curr.right:\n dfs(curr.right,sta+'1') #add 1\n if curr.isleaf():\n ab[curr.char] = sta\n dfs(tree,'')\n #debug(f'key 0::> {ab[0]}')\n return ab\n\n\n","repo_name":"AhmedAmrMohamed/data-compressor","sub_path":"src/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"255145350","text":"#adds sqlite3\r\nimport sqlite3\r\n\r\n#creates database\r\nconnection = sqlite3.connect('database.db')\r\n\r\n#opens schema file\r\nwith open('schema.sql') as f:\r\n connection.executescript(f.read())\r\n\r\n#updates and closes schema file\r\nconnection.commit()\r\nconnection.close()","repo_name":"AnaySharma01/GUI_Python_Project","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72743415237","text":"class Solution:\n def gridCheck(self, grid: List[List[int]]) -> int:\n current=[]\n row_sums=[]\n for row in grid:\n row_sums.append(sum(row))\n current.extend(row)\n current.sort()\n diag_sum1=grid[0][0]+grid[2][2]+grid[1][1]\n diag_sum2=grid[2][0]+grid[0][2]+grid[1][1]\n col_sums = [sum([row[i] for row in grid]) for i in range(3)]\n row_sums.extend(col_sums)\n row_sums.extend([diag_sum1,diag_sum2])\n result=len(set(row_sums))\n if result == 1 and current== list(range(1,10)):\n return True\n else:\n return False\n \n def numMagicSquaresInside(self, grid: List[List[int]]) -> int:\n result = 0\n for row in range(len(grid)-2):\n for col in range(len(grid)-2):\n square = [grid[row+index][col:col+3] for index in range(3)]\n if self.gridCheck(square):\n result += 1\n \n return result\n\n \n \n \n \n ","repo_name":"Yabsera-Haile/A2SV","sub_path":"0840-magic-squares-in-grid/0840-magic-squares-in-grid.py","file_name":"0840-magic-squares-in-grid.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"32075884337","text":"print(\"Welcome to the rollercoaster!\")\nheight = int(input(\"What is your height in cm? \"))\nif height>= 120:\n print(\"You can ride the rollercoaster\")\nelse:\n print(\"You can not ride the rollercoaster\")\n\n\n\nprint(\"Welcome to the rollercoaster!\")\nheight = int(input(\"What is your height in cm? \"))\nif height>= 120:\n print(\"You can ride the rollercoaster\")\n age=int(input(\"What is your age in years? \"))\n if age<12:\n print(\"You have to pay $5\")\n elif age<=18:\n print(\"You have to pay $7\")\n else:\n print(\"Pay $12\")\nelse:\n print(\"Sorry your height is short\")\n\n\n\n\n\n\n\n\n# 🚨 Don't change the code below 👇\nnumber = int(input(\"Which number do you want to check? \"))\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nif number%2==0:\n print(\"This is an even number\")\nelse:\n print(\"This is an odd number.\")\n\n\n\n\n\n# 🚨 Don't change the code below 👇\nheight = float(input(\"enter your height in m: \"))\nweight = float(input(\"enter your weight in kg: \"))\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nbmi=weight/(height**2)\nnbmi=round(bmi)\nif bmi<18.5:\n print(f\"Your BMI is {nbmi}, you are underweight.\")\nelif bmi<25:\n print(f\"Your BMI is {nbmi}, you have a normal weight.\")\nelif bmi<30:\n print(f\"Your BMI is {nbmi}, you are slightly overweight.\")\nelif bmi<35:\n print(f\"Your BMI is {nbmi}, you are obese.\")\nelif bmi>35:\n print(f\"Your BMI is {nbmi}, you are clinically obese.\")\n\n\n\n\n\n# 🚨 Don't change the code below 👇\nyear = int(input(\"Which year do you want to check? \"))\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nif(year%4==0&(year%100==0|year%400==0)):\n print(\"Leap year.\")\nelse:\n print(\"Not leap year.\")\n\n\n\n\nprint(\"Welcome to the rollercoaster!\")\nheight = int(input(\"What is your height in cm? \"))\nbill = 0\n\nif height >= 120:\n print(\"You can ride the rollercoaster!\")\n age = int(input(\"What is your age? \"))\n if age < 12:\n bill = 5\n print(\"Child tickets are $5.\")\n elif age <= 18:\n bill = 7\n print(\"Youth tickets are $7.\")\n else:\n bill = 12\n print(\"Adult tickets are $12.\")\n \n wants_photo = input(\"Do you want a photo taken? Y or N. \")\n if wants_photo == \"Y\":\n bill += 3\n \n print(f\"Your final bill is ${bill}\")\n\nelse:\n print(\"Sorry, you have to grow taller before you can ride.\")\n\n\n\n\n# 🚨 Don't change the code below 👇\nprint(\"Welcome to Python Pizza Deliveries!\")\nsize = input(\"What size pizza do you want? S, M, or L \")\nadd_pepperoni = input(\"Do you want pepperoni? Y or N \")\nextra_cheese = input(\"Do you want extra cheese? Y or N \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nif size==\"S\":\n bill=15\n if add_pepperoni==\"Y\":\n bill +=2\nelif size==\"M\":\n bill=20\n if add_pepperoni==\"Y\":\n bill +=3\nelif size==\"L\":\n bill=25\n if add_pepperoni==\"Y\":\n bill +=3\n \nif extra_cheese==\"Y\":\n bill+=1\n print(f\"Your final bill is: ${bill}\")\nelse:\n print(f\"Your final bill is: ${bill}\")\n\n \n\n\n# 🚨 Don't change the code below 👇\nprint(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\")\nname2 = input(\"What is their name? \\n\")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nn1=name1.lower()\nt1=n1.count('t')\nn2=name2.lower()\nt2=n2.count('t')\nt=t1+t2\nn1=name1.lower()\nr1=n1.count('r')\nn2=name2.lower()\nr2=n2.count('r')\nr=r1+r2\nn1=name1.lower()\nu1=n1.count('u')\nn2=name2.lower()\nu2=n2.count('u')\nu=u1+u2\nn1=name1.lower()\ne1=n1.count('e')\nn2=name2.lower()\ne2=n2.count('e')\ne=e1+e2\ntotal1=t+r+u+e\nn1=name1.lower()\nl1=n1.count('l')\nn2=name2.lower()\nl2=n2.count('l')\nl=l1+l2\nn1=name1.lower()\no1=n1.count('o')\nn2=name2.lower()\no2=n2.count('o')\no=o1+o2\nn1=name1.lower()\nv1=n1.count('v')\nn2=name2.lower()\nv2=n2.count('v')\nv=v1+v2\ntotal2=l+o+v+e\nscore=int((f\"{total1}\"+f\"{total2}\"))\nif(score<10 or score>90):\n print(f\"Your score is {score}, you go together like coke and mentos.\")\nelif(score>=40 and score<=50):\n print(f\"Your score is {score}, you are alright together.\")\nelse:\n print(f\"Your score is {score}\")\n\n\n\n\n\n","repo_name":"Shritik3/Python_100_DAYS-Code","sub_path":"DAY-3/conditionals.py","file_name":"conditionals.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19713257746","text":"\"\"\"\n@author: Arkan M. Gerges\n\"\"\"\nimport random\n# https://www.youtube.com/watch?v=dQK0VLahrDk&list=PLXs6ze70rLY9u0X6qz_91bCvsjq3Kqn_O&index=5\nimport threading\nfrom datetime import datetime\n\nimport src.port_adapter.AppDi as AppDi\nimport src.resource.proto._generated\nfrom src.port_adapter.api.grpc.listener.DailyCheckProcedureAppServiceListener import (\n DailyCheckProcedureAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.DailyCheckProcedureOperationAppServiceListener import (\n DailyCheckProcedureOperationAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.DailyCheckProcedureOperationParameterAppServiceListener import (\n DailyCheckProcedureOperationParameterAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.EquipmentAppServiceListener import (\n EquipmentAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.EquipmentCategoryGroupAppServiceListener import (\n EquipmentCategoryGroupAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.EquipmentInputAppServiceListener import (\n EquipmentInputAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.EquipmentModelAppServiceListener import (\n EquipmentModelAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.EquipmentProjectCategoryAppServiceListener import (\n EquipmentProjectCategoryAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.MaintenanceProcedureAppServiceListener import (\n MaintenanceProcedureAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.MaintenanceProcedureOperationAppServiceListener import (\n MaintenanceProcedureOperationAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.MaintenanceProcedureOperationParameterAppServiceListener import (\n MaintenanceProcedureOperationParameterAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.ManufacturerAppServiceListener import (\n ManufacturerAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.OrganizationAppServiceListener import (\n OrganizationAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.ProjectAppServiceListener import (\n ProjectAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.RoleAppServiceListener import (\n RoleAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardEquipmentAppServiceListener import (\n StandardEquipmentAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardEquipmentCategoryAppServiceListener import (\n StandardEquipmentCategoryAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardEquipmentCategoryGroupAppServiceListener import (\n StandardEquipmentCategoryGroupAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardEquipmentProjectCategoryAppServiceListener import (\n StandardEquipmentProjectCategoryAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardMaintenanceProcedureAppServiceListener import (\n StandardMaintenanceProcedureAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardMaintenanceProcedureOperationAppServiceListener import \\\n StandardMaintenanceProcedureOperationAppServiceListener\nfrom src.port_adapter.api.grpc.listener.StandardMaintenanceProcedureOperationParameterAppServiceListener import \\\n StandardMaintenanceProcedureOperationParameterAppServiceListener\nfrom src.port_adapter.api.grpc.listener.SubcontractorAppServiceListener import (\n SubcontractorAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.SubcontractorCategoryAppServiceListener import (\n SubcontractorCategoryAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.TagAppServiceListener import (\n TagAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.StandardMaintenanceProcedureOperationLabelAppServiceListener import (\n StandardMaintenanceProcedureOperationLabelAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.MaintenanceProcedureOperationLabelAppServiceListener import (\n MaintenanceProcedureOperationLabelAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.DailyCheckProcedureOperationLabelAppServiceListener import (\n DailyCheckProcedureOperationLabelAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.UnitAppServiceListener import (\n UnitAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.UserAppServiceListener import (\n UserAppServiceListener,\n)\nfrom src.port_adapter.api.grpc.listener.lookup.DailyCheckProcedureLookupAppServiceListener import \\\n DailyCheckProcedureLookupAppServiceListener\nfrom src.port_adapter.api.grpc.listener.lookup.EquipmentLookupAppServiceListener import \\\n EquipmentLookupAppServiceListener\nfrom src.port_adapter.api.grpc.listener.lookup.OrganizationLookupAppServiceListener import \\\n OrganizationLookupAppServiceListener\nfrom src.port_adapter.api.grpc.listener.lookup.ProjectLookupAppServiceListener import (\n ProjectLookupAppServiceListener, )\nfrom src.port_adapter.api.grpc.listener.lookup.SubcontractorLookupAppServiceListener import \\\n SubcontractorLookupAppServiceListener\nfrom src.port_adapter.api.grpc.listener.lookup.UserLookupAppServiceListener import (\n UserLookupAppServiceListener,\n)\nfrom src.resource.logging.LogProcessor import LogProcessor\nfrom src.resource.logging.opentelemetry.OpenTelemetry import OpenTelemetry\nfrom src.resource.proto._generated.project.daily_check_procedure_app_service_pb2_grpc import (\n add_DailyCheckProcedureAppServiceServicer_to_server,\n)\nfrom src.resource.proto._generated.project.daily_check_procedure_operation_app_service_pb2_grpc import (\n add_DailyCheckProcedureOperationAppServiceServicer_to_server,\n)\nfrom src.resource.proto._generated.project.daily_check_procedure_operation_parameter_app_service_pb2_grpc import (\n add_DailyCheckProcedureOperationParameterAppServiceServicer_to_server,\n)\nfrom src.resource.proto._generated.project.equipment_app_service_pb2_grpc import \\\n add_EquipmentAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.equipment_category_group_app_service_pb2_grpc import \\\n add_EquipmentCategoryGroupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.equipment_input_app_service_pb2_grpc import \\\n add_EquipmentInputAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.equipment_model_app_service_pb2_grpc import \\\n add_EquipmentModelAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.equipment_project_category_app_service_pb2_grpc import \\\n add_EquipmentProjectCategoryAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.lookup.daily_check_procedure.daily_check_procedure_lookup_app_service_pb2_grpc import \\\n add_DailyCheckProcedureLookupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.lookup.equipment.equipment_lookup_app_service_pb2_grpc import \\\n add_EquipmentLookupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.lookup.organization.organization_lookup_app_service_pb2_grpc import \\\n add_OrganizationLookupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.lookup.project.project_lookup_app_service_pb2_grpc import \\\n add_ProjectLookupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.lookup.subcontractor.subcontractor_lookup_app_service_pb2_grpc import \\\n add_SubcontractorLookupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.lookup.user.user_lookup_app_service_pb2_grpc import \\\n add_UserLookupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.maintenance_procedure_app_service_pb2_grpc import \\\n add_MaintenanceProcedureAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.maintenance_procedure_operation_app_service_pb2_grpc import \\\n add_MaintenanceProcedureOperationAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.maintenance_procedure_operation_parameter_app_service_pb2_grpc import \\\n add_MaintenanceProcedureOperationParameterAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.manufacturer_app_service_pb2_grpc import \\\n add_ManufacturerAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.organization_app_service_pb2_grpc import \\\n add_OrganizationAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.project_app_service_pb2_grpc import add_ProjectAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.role_app_service_pb2_grpc import add_RoleAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_equipment_app_service_pb2_grpc import \\\n add_StandardEquipmentAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_equipment_category_app_service_pb2_grpc import \\\n add_StandardEquipmentCategoryAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_equipment_category_group_app_service_pb2_grpc import \\\n add_StandardEquipmentCategoryGroupAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_equipment_project_category_app_service_pb2_grpc import \\\n add_StandardEquipmentProjectCategoryAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_maintenance_procedure_app_service_pb2_grpc import \\\n add_StandardMaintenanceProcedureAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_maintenance_procedure_operation_app_service_pb2_grpc import \\\n add_StandardMaintenanceProcedureOperationAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.standard_maintenance_procedure_operation_parameter_app_service_pb2_grpc import \\\n add_StandardMaintenanceProcedureOperationParameterAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.subcontractor_app_service_pb2_grpc import \\\n add_SubcontractorAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.subcontractor_category_app_service_pb2_grpc import \\\n add_SubcontractorCategoryAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.tag_app_service_pb2_grpc import add_TagAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.unit_app_service_pb2_grpc import add_UnitAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.user_app_service_pb2_grpc import add_UserAppServiceServicer_to_server\n\nfrom src.resource.proto._generated.project.standard_maintenance_procedure_operation_label_app_service_pb2_grpc import add_StandardMaintenanceProcedureOperationLabelAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.maintenance_procedure_operation_label_app_service_pb2_grpc import add_MaintenanceProcedureOperationLabelAppServiceServicer_to_server\nfrom src.resource.proto._generated.project.daily_check_procedure_operation_label_app_service_pb2_grpc import add_DailyCheckProcedureOperationLabelAppServiceServicer_to_server\n\n\n\"\"\"The Python implementation of the GRPC Seans-gRPC server.\"\"\"\nfrom concurrent import futures\n\nimport grpc\nfrom grpc_reflection.v1alpha import reflection\n\n\nfrom src.resource.logging.logger import logger\n\n\ndef serve():\n \"\"\"The main serve function of the server.\n This opens the socket, and listens for incoming grpc conformant packets\"\"\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))\n add_ProjectAppServiceServicer_to_server(ProjectAppServiceListener(), server)\n add_UserAppServiceServicer_to_server(UserAppServiceListener(), server)\n add_OrganizationAppServiceServicer_to_server(\n OrganizationAppServiceListener(), server\n )\n add_UserLookupAppServiceServicer_to_server(UserLookupAppServiceListener(), server)\n add_ProjectLookupAppServiceServicer_to_server(\n ProjectLookupAppServiceListener(), server\n )\n add_OrganizationLookupAppServiceServicer_to_server(\n OrganizationLookupAppServiceListener(), server\n )\n add_EquipmentModelAppServiceServicer_to_server(\n EquipmentModelAppServiceListener(), server\n )\n add_ManufacturerAppServiceServicer_to_server(\n ManufacturerAppServiceListener(), server\n )\n add_EquipmentProjectCategoryAppServiceServicer_to_server(\n EquipmentProjectCategoryAppServiceListener(), server\n )\n add_EquipmentCategoryGroupAppServiceServicer_to_server(\n EquipmentCategoryGroupAppServiceListener(), server\n )\n add_EquipmentAppServiceServicer_to_server(EquipmentAppServiceListener(), server)\n add_UnitAppServiceServicer_to_server(UnitAppServiceListener(), server)\n add_EquipmentInputAppServiceServicer_to_server(\n EquipmentInputAppServiceListener(), server\n )\n add_SubcontractorAppServiceServicer_to_server(\n SubcontractorAppServiceListener(), server\n )\n add_MaintenanceProcedureAppServiceServicer_to_server(\n MaintenanceProcedureAppServiceListener(), server\n )\n add_MaintenanceProcedureOperationAppServiceServicer_to_server(\n MaintenanceProcedureOperationAppServiceListener(), server\n )\n add_MaintenanceProcedureOperationParameterAppServiceServicer_to_server(\n MaintenanceProcedureOperationParameterAppServiceListener(), server\n )\n add_DailyCheckProcedureAppServiceServicer_to_server(\n DailyCheckProcedureAppServiceListener(), server\n )\n add_DailyCheckProcedureOperationAppServiceServicer_to_server(\n DailyCheckProcedureOperationAppServiceListener(), server\n )\n add_DailyCheckProcedureOperationParameterAppServiceServicer_to_server(\n DailyCheckProcedureOperationParameterAppServiceListener(), server\n )\n add_StandardMaintenanceProcedureAppServiceServicer_to_server(\n StandardMaintenanceProcedureAppServiceListener(), server\n )\n add_StandardMaintenanceProcedureOperationAppServiceServicer_to_server(\n StandardMaintenanceProcedureOperationAppServiceListener(), server\n )\n add_StandardMaintenanceProcedureOperationParameterAppServiceServicer_to_server(\n StandardMaintenanceProcedureOperationParameterAppServiceListener(), server\n )\n add_SubcontractorCategoryAppServiceServicer_to_server(\n SubcontractorCategoryAppServiceListener(), server\n )\n add_StandardEquipmentAppServiceServicer_to_server(\n StandardEquipmentAppServiceListener(), server\n )\n add_StandardEquipmentCategoryAppServiceServicer_to_server(\n StandardEquipmentCategoryAppServiceListener(), server\n )\n add_StandardEquipmentCategoryGroupAppServiceServicer_to_server(\n StandardEquipmentCategoryGroupAppServiceListener(), server\n )\n add_StandardEquipmentProjectCategoryAppServiceServicer_to_server(\n StandardEquipmentProjectCategoryAppServiceListener(), server\n )\n add_RoleAppServiceServicer_to_server(RoleAppServiceListener(), server)\n add_TagAppServiceServicer_to_server(TagAppServiceListener(), server)\n\n add_StandardMaintenanceProcedureOperationLabelAppServiceServicer_to_server(StandardMaintenanceProcedureOperationLabelAppServiceListener(), server)\n add_MaintenanceProcedureOperationLabelAppServiceServicer_to_server(MaintenanceProcedureOperationLabelAppServiceListener(), server)\n add_DailyCheckProcedureOperationLabelAppServiceServicer_to_server(DailyCheckProcedureOperationLabelAppServiceListener(), server)\n\n\n add_SubcontractorLookupAppServiceServicer_to_server(\n SubcontractorLookupAppServiceListener(), server\n )\n\n add_EquipmentLookupAppServiceServicer_to_server(\n EquipmentLookupAppServiceListener(), server\n )\n\n add_DailyCheckProcedureLookupAppServiceServicer_to_server(\n DailyCheckProcedureLookupAppServiceListener(), server\n )\n\n SERVICE_NAMES = (\n\n\n\n src.resource.proto._generated.project.standard_equipment_project_category_app_service_pb2.DESCRIPTOR.services_by_name[\"StandardEquipmentProjectCategoryAppService\"].full_name,\n src.resource.proto._generated.project.daily_check_procedure_operation_app_service_pb2.DESCRIPTOR.services_by_name['DailyCheckProcedureOperationAppService'].full_name,\n src.resource.proto._generated.project.daily_check_procedure_operation_parameter_app_service_pb2.DESCRIPTOR.services_by_name['DailyCheckProcedureOperationParameterAppService'].full_name,\n src.resource.proto._generated.project.equipment_app_service_pb2.DESCRIPTOR.services_by_name['EquipmentAppService'].full_name,\n src.resource.proto._generated.project.equipment_category_group_app_service_pb2.DESCRIPTOR.services_by_name['EquipmentCategoryGroupAppService'].full_name,\n src.resource.proto._generated.project.equipment_input_app_service_pb2.DESCRIPTOR.services_by_name['EquipmentInputAppService'].full_name,\n src.resource.proto._generated.project.equipment_model_app_service_pb2.DESCRIPTOR.services_by_name['EquipmentModelAppService'].full_name,\n src.resource.proto._generated.project.equipment_project_category_app_service_pb2.DESCRIPTOR.services_by_name['EquipmentProjectCategoryAppService'].full_name,\n src.resource.proto._generated.project.maintenance_procedure_app_service_pb2.DESCRIPTOR.services_by_name['MaintenanceProcedureAppService'].full_name,\n src.resource.proto._generated.project.maintenance_procedure_operation_app_service_pb2.DESCRIPTOR.services_by_name['MaintenanceProcedureOperationAppService'].full_name,\n src.resource.proto._generated.project.maintenance_procedure_operation_parameter_app_service_pb2.DESCRIPTOR.services_by_name['MaintenanceProcedureOperationParameterAppService'].full_name,\n src.resource.proto._generated.project.manufacturer_app_service_pb2.DESCRIPTOR.services_by_name['ManufacturerAppService'].full_name,\n src.resource.proto._generated.project.organization_app_service_pb2.DESCRIPTOR.services_by_name['OrganizationAppService'].full_name,\n src.resource.proto._generated.project.project_app_service_pb2.DESCRIPTOR.services_by_name['ProjectAppService'].full_name,\n src.resource.proto._generated.project.subcontractor_app_service_pb2.DESCRIPTOR.services_by_name['SubcontractorAppService'].full_name,\n src.resource.proto._generated.project.unit_app_service_pb2.DESCRIPTOR.services_by_name['UnitAppService'].full_name,\n src.resource.proto._generated.project.user_app_service_pb2.DESCRIPTOR.services_by_name['UserAppService'].full_name,\n src.resource.proto._generated.project.standard_maintenance_procedure_app_service_pb2.DESCRIPTOR.services_by_name['StandardMaintenanceProcedureAppService'].full_name,\n src.resource.proto._generated.project.standard_maintenance_procedure_operation_app_service_pb2.DESCRIPTOR.services_by_name['StandardMaintenanceProcedureOperationAppService'].full_name,\n src.resource.proto._generated.project.standard_maintenance_procedure_operation_parameter_app_service_pb2.DESCRIPTOR.services_by_name['StandardMaintenanceProcedureOperationParameterAppService'].full_name,\n src.resource.proto._generated.project.subcontractor_category_app_service_pb2.DESCRIPTOR.services_by_name['SubcontractorCategoryAppService'].full_name,\n src.resource.proto._generated.project.standard_equipment_app_service_pb2.DESCRIPTOR.services_by_name['StandardEquipmentAppService'].full_name,\n src.resource.proto._generated.project.standard_equipment_category_app_service_pb2.DESCRIPTOR.services_by_name['StandardEquipmentCategoryAppService'].full_name,\n src.resource.proto._generated.project.standard_equipment_category_group_app_service_pb2.DESCRIPTOR.services_by_name['StandardEquipmentCategoryGroupAppService'].full_name,\n src.resource.proto._generated.project.role_app_service_pb2.DESCRIPTOR.services_by_name['RoleAppService'].full_name,\n src.resource.proto._generated.project.tag_app_service_pb2.DESCRIPTOR.services_by_name['TagAppService'].full_name,\n src.resource.proto._generated.project.daily_check_procedure_app_service_pb2.DESCRIPTOR.services_by_name['DailyCheckProcedureAppService'].full_name,\n\n src.resource.proto._generated.project.daily_check_procedure_operation_label_app_service_pb2.DESCRIPTOR.services_by_name['DailyCheckProcedureOperationLabelAppService'].full_name,\n src.resource.proto._generated.project.maintenance_procedure_operation_label_app_service_pb2.DESCRIPTOR.services_by_name['MaintenanceProcedureOperationLabelAppService'].full_name,\n src.resource.proto._generated.project.standard_maintenance_procedure_operation_label_app_service_pb2.DESCRIPTOR.services_by_name['StandardMaintenanceProcedureOperationLabelAppService'].full_name,\n\n\n # Lookups\n src.resource.proto._generated.project.lookup.user.user_lookup_app_service_pb2.DESCRIPTOR.services_by_name['UserLookupAppService'].full_name,\n src.resource.proto._generated.project.lookup.project.project_lookup_app_service_pb2.DESCRIPTOR.services_by_name['ProjectLookupAppService'].full_name,\n src.resource.proto._generated.project.lookup.organization.organization_lookup_app_service_pb2.DESCRIPTOR.services_by_name['OrganizationLookupAppService'].full_name,\n src.resource.proto._generated.project.lookup.equipment.equipment_lookup_app_service_pb2.DESCRIPTOR.services_by_name['EquipmentLookupAppService'].full_name,\n src.resource.proto._generated.project.lookup.subcontractor.subcontractor_lookup_app_service_pb2.DESCRIPTOR.services_by_name['SubcontractorLookupAppService'].full_name,\n src.resource.proto._generated.project.lookup.daily_check_procedure.daily_check_procedure_lookup_app_service_pb2.DESCRIPTOR.services_by_name['DailyCheckProcedureLookupAppService'].full_name,\n\n reflection.SERVICE_NAME,\n )\n reflection.enable_server_reflection(SERVICE_NAMES, server)\n\n port = \"[::]:9999\"\n server.add_insecure_port(port)\n logger.info(f\"Project microservice grpc server started/restarted on port {port}\")\n server.start()\n\n # try:\n # while True:\n # print(\"Server Running : threadcount %i\" % (threading.active_count()))\n # time.sleep(10)\n # except KeyboardInterrupt:\n # print(\"KeyboardInterrupt\")\n # server.stop(0)\n server.wait_for_termination()\n\n\nif __name__ == \"__main__\":\n random.seed(datetime.utcnow().timestamp())\n openTelemetry = AppDi.instance.get(OpenTelemetry)\n\n # region Logger\n import src.resource.Di as Di\n\n logProcessor = Di.instance.get(LogProcessor)\n thread = threading.Thread(target=logProcessor.start)\n thread.start()\n # endregion\n\n serve()\n","repo_name":"arkanmgerges/cafm.project","sub_path":"src/port_adapter/api/grpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":22499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19389696551","text":"class Quiz:\n def __init__(self):\n self.questions = []\n\n def add_question(self, question, choices, correct_choice):\n self.questions.append({\n 'question': question,\n 'choices': choices,\n 'correct_choice': correct_choice\n })\n print(\"Question added successfully!\")\n\n def take_quiz(self):\n if not self.questions:\n print(\"No questions available in the quiz.\")\n return\n\n score = 0\n total_questions = len(self.questions)\n\n print(f\"\\n--- Quiz ({total_questions} Questions) ---\")\n\n for idx, q in enumerate(self.questions, start=1):\n print(f\"\\nQuestion {idx}: {q['question']}\")\n for choice_num, choice in enumerate(q['choices'], start=1):\n print(f\"{choice_num}. {choice}\")\n\n user_choice = int(input(\"Enter your choice (1-4): \")) - 1\n\n if 0 <= user_choice < len(q['choices']):\n if user_choice == q['correct_choice']:\n print(\"Correct!\")\n score += 1\n else:\n print(f\"Wrong! The correct answer is: {q['choices'][q['correct_choice']]}\\n\")\n\n print(f\"\\n--- Quiz Results ---\")\n print(f\"Total Questions: {total_questions}\")\n print(f\"Correct Answers: {score}\")\n print(f\"Your Score: {score}/{total_questions}\")\n\ndef main():\n quiz = Quiz()\n\n while True:\n print(\"\\n--- Quiz Application Menu ---\")\n print(\"1. Add a question to the quiz\")\n print(\"2. Take the quiz\")\n print(\"3. Exit\")\n\n choice = int(input(\"Enter your choice (1-3): \"))\n\n if choice == 1:\n question = input(\"Enter the question: \")\n choices = [input(f\"Enter choice {i}: \") for i in range(1, 5)]\n correct_choice = int(input(\"Enter the correct choice (1-4): \")) - 1\n quiz.add_question(question, choices, correct_choice)\n\n elif choice == 2:\n quiz.take_quiz()\n\n elif choice == 3:\n print(\"Exiting the Quiz Application.\")\n break\n\n else:\n print(\"Invalid choice. Please choose again.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"arpita837/Quiz-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26975213938","text":"allowed_gammas = [1.5, 3, 6]\n\nspias_type = 'nominal_spias'\n\ndef scenario_space_update_params(model_params, control_params):\n\n model_params[spias_type] = control_params['spias']\n if model_params['real_bonds']:\n model_params['p_taxable_real_bonds_low'] = model_params['p_taxable_real_bonds_high'] = control_params['p_taxable_bonds']\n model_params['p_taxable_real_bonds_basis'] = control_params['p_taxable_bonds_basis']\n elif model_params['nominal_bonds']:\n model_params['p_taxable_nominal_bonds_low'] = model_params['p_taxable_nominal_bonds_high'] = control_params['p_taxable_bonds']\n model_params['p_taxable_nominal_bonds_basis'] = control_params['p_taxable_bonds_basis']\n elif model_params['iid_bonds']:\n model_params['p_taxable_iid_bonds_low'] = model_params['p_taxable_iid_bonds_high'] = control_params['p_taxable_bonds']\n model_params['p_taxable_iid_bonds_basis'] = control_params['p_taxable_bonds_basis']\n else:\n assert False\n\n if control_params['gammas'] is None:\n control_params['gammas'] = allowed_gammas\n\ndef force_preretirement_model(stage, spias, gamma):\n\n # Performance loss from using pre-retirement trained model in retirement is usually small, and it reduces the number of models we have to train.\n # Performance loss is larger when no spias and gamma >= 6, so use a separately trained retirement model then.\n return stage == 'retired' and (spias or gamma < 6)\n\ndef scenario_space_model_filename(model_params):\n\n stage = 'retired' if model_params['age_start'] >= max(50, model_params['age_retirement_low']) else 'preretirement'\n # Retirement model is trained starting from age 50.\n spias = model_params['real_spias'] or model_params['nominal_spias']\n spias_str = 'spias' if spias else 'no_spias'\n assert model_params['gamma_low'] == model_params['gamma_high'], \"Can't evaluate a gamma range.\"\n gamma = model_params['gamma_low']\n if int(gamma) == gamma:\n gamma = int(gamma)\n assert gamma in allowed_gammas, 'gamma values must be selected from ' + allowed_gammas\n\n if force_preretirement_model(stage, spias, gamma):\n stage = 'preretirement'\n\n return 'aiplanner-' + stage + '-' + spias_str + '-gamma' + str(gamma) + '.tf'\n\ndef enumerate_model_params_api(gammas):\n\n return [({\n 'age_start': 20 if stage == 'preretirement' else 100,\n 'age_retirement_low': 67,\n 'age_retirement_high': 67,\n 'stocks_price_low': 1,\n 'stocks_price_high': 1,\n 'stocks_sigma_level_type': 'average',\n 'real_short_rate_type': 'current',\n 'inflation_short_rate_type': 'current',\n }, [{\n 'spias': spias,\n 'rra': [gamma for gamma in gammas if not force_preretirement_model(stage, spias, gamma)],\n }]) for stage in ('preretirement', 'retired') for spias in (False, True) if not all((force_preretirement_model(stage, spias, gamma) for gamma in gammas))]\n","repo_name":"gordoni/aiplanner","sub_path":"ai/common/scenario_space.py","file_name":"scenario_space.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"62"} +{"seq_id":"23874564005","text":"# pyzChat-Client\nimport zmq\nimport printHelper\n\ndef main():\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:5557\")\n\n name = printHelper.printIntro() # Print intro msg & ask for username\n if name == \"quit\":\n print(\"Bye Bye~\")\n return\n\n my_info = {\"userid\" : 1234, \"username\" : name}\n printHelper.printUser(my_info)\n\n # Show chatroom right after user login.\n socket.send_string(\"#chatrooms\")\n chatrooms = socket.recv_string()\n printHelper.printRooms(chatrooms)\n\n while True:\n message = input(f\"<👽{my_info['username']}({my_info['userid']})> \")\n if message == \"#quit\":\n # erase user info?\n print(\"Exit pyzChat...\")\n break\n # HELP\n if message == \"#help\":\n printHelper.printHelp()\n # SHOW chatrooms\n elif (message[:3] == \"#re\") or (message == \"#chatrooms\"):\n socket.send_string(\"#chatrooms\")\n chatrooms = socket.recv_string()\n printHelper.printRooms(chatrooms)\n # ADD chatroom\n elif message == \"#addroom\":\n room_name = input(\"Room name(no spaces) : \")\n socket.send_string(\"#addroom \" + room_name)\n chatrooms = socket.recv_string()\n printHelper.printRooms(chatrooms)\n # ENTER chatroom\n elif message == \"#enterroom\":\n room_name = input(\"Room name : \")\n print(f\"Entering the room {room_name}...\")\n # Enter the room\n else:\n print(f\"Command {message} doesn't exists. Try #help to check every(5) commands.\")\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"ewh4l/pyzChat","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"45938062370","text":"import sys\n\ninput = sys.stdin.readline\n\n\ndef upper_bound(data, target, _l, _r):\n l = _l\n r = _r\n\n while l < r:\n mid = (l + r) // 2\n tmp = get_total(mid)\n if tmp >= target:\n l = mid + 1\n else:\n r = mid\n\n return r - 1\n\n\ndef get_total(lim):\n cnt = 0\n for i in data:\n if i > lim:\n cnt += i - lim\n\n return cnt\n\n\nn, m = map(int, input().split())\ndata = list(map(int, input().split()))\nprint(upper_bound(data, m, 0, max(data)))\n","repo_name":"ThinkingDobby/PythonProgramming","sub_path":"baekjoon/year22/mon8/2508) 나무 자르기.py","file_name":"2508) 나무 자르기.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11405020658","text":"import os\nimport io\nimport openai\nfrom google.cloud import vision\nimport re\nfrom utils import FILES,initialize, select_file\n\n\ndef chatGPTimageResult( prompt, Language = \"English\"):\n\n myTable = VisionToTable(prompt)\n # print (f\"\\n\\nMY TABLE:\\n\\n{myTable}\") #enable to see the table\n imageReqLine = [{\n \"role\": \"system\", \n \"content\": \"You are a predictor which analysis text contents about a picture and sends back the best possible results. You give prompt answers and you do not mention about the table given to you. You just type in what you understand from the input material\"\n }]\n reptr=\"\"\n if Language != \"English\":\n reptr = f\"Please provide the answer fully in {Language}\"\n \n newline = imageReqLine+[{\"role\": \"user\", \"content\": f\"Here is the probabilities about a picture as a table:\\n\\n{myTable}\\n\\nPlease type down what is in the picture considering this table. Do not mention the table or score numbers, just give an answer like an author. {reptr} \"}]\n # print (newline)\n\n try:\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=newline,\n max_tokens=2500,\n temperature = 0.4\n )\n ChatGPT_reply = response[\"choices\"][0][\"message\"][\"content\"]\n ln1,ln2,ln3 = response[\"usage\"][\"prompt_tokens\"],response[\"usage\"][\"completion_tokens\"],response[\"usage\"][\"total_tokens\"]\n tokeninfo = f\" - [Tokens: {ln1}-{ln2}-{ln3}]\"\n #tokeninfo = \"\"\n return (f\"{ChatGPT_reply}\")\n except Exception as e:\n print (f\"Exception:\\n{e}\\n\")\n\n return (e)\n\ndef Interrogate_Image(image_file_path):\n\n client = vision.ImageAnnotatorClient()\n\n with io.open(image_file_path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n\n response = client.annotate_image({\n 'image': image,\n 'features': [{'type_': vision.Feature.Type.LABEL_DETECTION}]\n })\n return (str(response))\n\ndef VisionToTable(content):\n blocks = content.split('label_annotations')\n # Parse each block and append it to a list\n data = []\n mystr=\"LABELS:description,score,topicality\\n\"\n for block in blocks:\n if block.strip() != '':\n mystr= mystr+parse_block2(block)\n # Convert the list of dictionaries into a DataFrame\n #print (f\"Deneme Str: {mystr}\")\n return(mystr)\n\ndef parse_block2(block):\n \"\"\"\n Parses a block of text and returns a dictionary containing the data.\n \"\"\"\n lines = block.split('\\n')\n\n TheStr=\"DATA:\"\n\n for line in lines:\n line = line.strip()\n match = re.match(r'description: \"(.*)\"', line)\n if match:\n TheStr = TheStr+ match.group(1) + \",\"\n else:\n match = re.match(r'score: (\\d+\\.\\d+)', line)\n if match:\n # Convert to float and format with two decimal places\n TheStr = TheStr+ format(float(match.group(1)), '.2f') + \",\"\n else:\n match = re.match(r'topicality: (\\d+\\.\\d+)', line)\n if match:\n TheStr = TheStr+ format(float(match.group(1)), '.2f') + \"\\n\"\n \n return TheStr\n\n\n# atexit.register(cleanup)\n\nif __name__ == '__main__':\n initialize()\n file = select_file()\n whatisinimage = Interrogate_Image(file)[0:2000]\n # print (whatisinimage) #enable this line to see what is going on\n imageContains= chatGPTimageResult(whatisinimage,Language=\"English\")\n print (imageContains)\n","repo_name":"alperinugur/what_is_image","sub_path":"what_is_image.py","file_name":"what_is_image.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34284339532","text":"\"\"\"A few simple tests of the public cache API.\n\nEach DB Connection has a separate PickleCache. The Cache serves two\npurposes. It acts like a memo for unpickling. It also keeps recent\nobjects in memory under the assumption that they may be used again.\n\"\"\"\nfrom __future__ import nested_scopes\n\nimport time\nimport types\nimport unittest\n\nimport ZODB\nimport ZODB.MappingStorage\nfrom ZODB.cPickleCache import PickleCache\nfrom ZODB.POSException import ConflictError\nfrom ZODB.PersistentMapping import PersistentMapping\nfrom ZODB.tests.MinPO import MinPO\nfrom ZODB.utils import p64\n\nfrom Persistence import Persistent\n\nclass CacheTestBase(unittest.TestCase):\n\n def setUp(self):\n store = ZODB.MappingStorage.MappingStorage()\n self.db = ZODB.DB(store,\n cache_size = self.CACHE_SIZE)\n self.conns = []\n\n def tearDown(self):\n for conn in self.conns:\n conn.close()\n self.db.close()\n\n CACHE_SIZE = 20\n\n def noodle_new_connection(self):\n \"\"\"Do some reads and writes on a new connection.\"\"\"\n\n c = self.db.open()\n self.conns.append(c)\n self.noodle_connection(c)\n\n def noodle_connection(self, c):\n r = c.root()\n\n i = len(self.conns)\n d = r.get(i)\n if d is None:\n d = r[i] = PersistentMapping()\n get_transaction().commit()\n\n for i in range(15):\n o = d.get(i)\n if o is None:\n o = d[i] = MinPO(i)\n o.value += 1\n get_transaction().commit()\n\nclass DBMethods(CacheTestBase):\n\n __super_setUp = CacheTestBase.setUp\n\n def setUp(self):\n self.__super_setUp()\n for i in range(4):\n self.noodle_new_connection()\n\n def checkCacheDetail(self):\n for name, count in self.db.cacheDetail():\n self.assert_(isinstance(name, types.StringType))\n self.assert_(isinstance(count, types.IntType))\n\n def checkCacheExtremeDetail(self):\n expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']\n for dict in self.db.cacheExtremeDetail():\n for k, v in dict.items():\n self.assert_(k in expected)\n\n # XXX not really sure how to do a black box test of the cache.\n # should the full sweep and minimize calls always remove things?\n\n # The sleep(3) call is based on the implementation of the cache.\n # It's measures time in units of three seconds, so something used\n # within the last three seconds looks like something that is\n # currently being used. Three seconds old is the youngest\n # something can be and still be collected.\n\n def checkFullSweep(self):\n old_size = self.db.cacheSize()\n time.sleep(3)\n self.db.cacheFullSweep(0)\n new_size = self.db.cacheSize()\n self.assert_(new_size < old_size, \"%s < %s\" % (old_size, new_size))\n\n def checkMinimize(self):\n old_size = self.db.cacheSize()\n time.sleep(3)\n self.db.cacheMinimize(0)\n new_size = self.db.cacheSize()\n self.assert_(new_size < old_size, \"%s < %s\" % (old_size, new_size))\n\n # XXX don't have an explicit test for incrgc, because the\n # connection and database call it internally\n\n # XXX same for the get and invalidate methods\n\n def checkLRUitems(self):\n # get a cache\n c = self.conns[0]._cache\n c.lru_items()\n\n def checkClassItems(self):\n c = self.conns[0]._cache\n c.klass_items()\n\nclass LRUCacheTests(CacheTestBase):\n\n def checkLRU(self):\n # verify the LRU behavior of the cache\n dataset_size = 5\n CACHE_SIZE = dataset_size*2+1\n # a cache big enough to hold the objects added in two\n # transactions, plus the root object\n self.db.setCacheSize(CACHE_SIZE)\n c = self.db.open()\n r = c.root()\n l = {}\n # the root is the only thing in the cache, because all the\n # other objects are new\n self.assertEqual(len(c._cache), 1)\n # run several transactions\n for t in range(5):\n for i in range(dataset_size):\n l[(t,i)] = r[i] = MinPO(i)\n get_transaction().commit()\n # commit() will register the objects, placing them in the\n # cache. at the end of commit, the cache will be reduced\n # down to CACHE_SIZE items\n if len(l)>CACHE_SIZE:\n self.assertEqual(c._cache.ringlen(), CACHE_SIZE)\n for i in range(dataset_size):\n # Check objects added in the first two transactions.\n # They must all be ghostified.\n self.assertEqual(l[(0,i)]._p_changed, None)\n self.assertEqual(l[(1,i)]._p_changed, None)\n # Check objects added in the last two transactions.\n # They must all still exist in memory, but have\n # had their changes flushed\n self.assertEqual(l[(3,i)]._p_changed, 0)\n self.assertEqual(l[(4,i)]._p_changed, 0)\n # Of the objects added in the middle transaction, most\n # will have been ghostified. There is one cache slot\n # that may be occupied by either one of those objects or\n # the root, depending on precise order of access. We do\n # not bother to check this\n\n def checkSize(self):\n self.assertEqual(self.db.cacheSize(), 0)\n self.assertEqual(self.db.cacheDetailSize(), [])\n\n CACHE_SIZE = 10\n self.db.setCacheSize(CACHE_SIZE)\n\n CONNS = 3\n for i in range(CONNS):\n self.noodle_new_connection()\n\n self.assertEquals(self.db.cacheSize(), CACHE_SIZE * CONNS)\n details = self.db.cacheDetailSize()\n self.assertEquals(len(details), CONNS)\n for d in details:\n self.assertEquals(d['ngsize'], CACHE_SIZE)\n # the root is also in the cache as ghost, because\n # the connection holds a reference to it\n self.assertEquals(d['size'], CACHE_SIZE + 1)\n\n def checkDetail(self):\n CACHE_SIZE = 10\n self.db.setCacheSize(CACHE_SIZE)\n\n CONNS = 3\n for i in range(CONNS):\n self.noodle_new_connection()\n\n for klass, count in self.db.cacheDetail():\n if klass.endswith('MinPO'):\n self.assertEqual(count, CONNS * CACHE_SIZE)\n if klass.endswith('PersistentMapping'):\n # one root per connection\n self.assertEqual(count, CONNS)\n\n for details in self.db.cacheExtremeDetail():\n # one dict per object. keys:\n if details['klass'].endswith('PersistentMapping'):\n self.assertEqual(details['state'], None)\n else:\n self.assert_(details['klass'].endswith('MinPO'))\n self.assertEqual(details['state'], 0)\n\nclass StubDataManager:\n def setklassstate(self, object):\n pass\n\nclass StubObject(Persistent):\n pass\n\nclass CacheErrors(unittest.TestCase):\n\n def setUp(self):\n self.jar = StubDataManager()\n self.cache = PickleCache(self.jar)\n\n def checkGetBogusKey(self):\n self.assertRaises(KeyError, self.cache.get, p64(0))\n try:\n self.cache[12]\n except KeyError:\n pass\n else:\n self.fail(\"expected KeyError\")\n try:\n self.cache[12] = 12\n except TypeError:\n pass\n else:\n self.fail(\"expected TyepError\")\n try:\n del self.cache[12]\n except TypeError:\n pass\n else:\n self.fail(\"expected TypeError\")\n\n def checkBogusObject(self):\n def add(key, obj):\n self.cache[key] = obj\n\n key = p64(2)\n # value isn't persistent\n self.assertRaises(TypeError, add, key, 12)\n\n o = StubObject()\n # o._p_oid == None\n self.assertRaises(TypeError, add, key, o)\n\n o._p_oid = p64(3)\n self.assertRaises(ValueError, add, key, o)\n\n o._p_oid = key\n # o._p_jar == None\n self.assertRaises(Exception, add, key, o)\n\n o._p_jar = self.jar\n self.cache[key] = o\n # make sure it can be added multiple times\n self.cache[key] = o\n\n # same object, different keys\n self.assertRaises(ValueError, add, p64(0), o)\n\n def checkTwoCaches(self):\n jar2 = StubDataManager()\n cache2 = PickleCache(jar2)\n\n o = StubObject()\n key = o._p_oid = p64(1)\n o._p_jar = jar2\n\n cache2[key] = o\n\n try:\n self.cache[key] = o\n except ValueError:\n pass\n else:\n self.fail(\"expected ValueError because object already in cache\")\n\n def checkReadOnlyAttrsWhenCached(self):\n o = StubObject()\n key = o._p_oid = p64(1)\n o._p_jar = self.jar\n self.cache[key] = o\n try:\n o._p_oid = p64(2)\n except ValueError:\n pass\n else:\n self.fail(\"expect that you can't change oid of cached object\")\n try:\n del o._p_jar\n except ValueError:\n pass\n else:\n self.fail(\"expect that you can't delete jar of cached object\")\n\ndef test_suite():\n s = unittest.makeSuite(DBMethods, 'check')\n s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))\n s.addTest(unittest.makeSuite(CacheErrors, 'check'))\n return s\n","repo_name":"OS2World/APP-SERVER-Zope","sub_path":"lib/python/ZODB/tests/testCache.py","file_name":"testCache.py","file_ext":"py","file_size_in_byte":9402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18448796258","text":"from .framework import (\n selenium_test,\n SharedStateSeleniumTestCase,\n)\n\n\nclass TestPublishedPagesGrid(SharedStateSeleniumTestCase):\n @selenium_test\n def test_index(self):\n self.navigate_to_published_pages()\n self.components.pages.dropdown(id=self.page_id_1).wait_for_visible()\n\n def setup_shared_state(self):\n self.user1_email = self._get_random_email(\"test1\")\n self.user2_email = self._get_random_email(\"test2\")\n self.register(self.user1_email)\n page_1 = self.new_public_page()\n self.page_id_1 = page_1[\"id\"]\n self.slug_1 = page_1[\"slug\"]\n self.logout_if_needed()\n\n self.register(self.user2_email)\n page_2 = self.new_public_page()\n self.page_id_2 = page_2[\"id\"]\n self.slug_2 = page_2[\"slug\"]\n\n def new_public_page(self):\n slug = self._get_random_name()\n response = self.dataset_populator.new_page(slug=slug)\n self.dataset_populator.make_page_public(response[\"id\"])\n return response\n","repo_name":"galaxyproject/galaxy","sub_path":"lib/galaxy_test/selenium/test_published_pages.py","file_name":"test_published_pages.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1194,"dataset":"github-code","pt":"62"} +{"seq_id":"18262223238","text":"from collections import deque\nfrom typing import List\n\n\nclass Solution:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n m = len(mat)\n n = len(mat[0])\n res = [[0] * n for _ in range(m)]\n zero_pos = []\n\n for i in range(m):\n for j in range(n):\n if mat[i][j] == 0:\n zero_pos.append((i, j))\n\n q = deque(zero_pos)\n seen = set(q)\n\n while q:\n i, j = q.popleft()\n next = [(i - 1, j), (i + 1, j), (i, j + 1), (i, j - 1)]\n for x, y in next:\n if 0 <= x < m and 0 <= y < n and (x, y) not in seen:\n res[x][y] = res[i][j] + 1\n q.append((x, y))\n seen.add((x, y))\n\n return res\n\n\nsol = Solution()\nprint(sol.updateMatrix([[0, 0, 0],\n [0, 1, 0],\n [1, 1, 1]]))","repo_name":"xxcadam/Leetcode","sub_path":"107.py","file_name":"107.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18033419995","text":"import cv2 as cv\ndisplayWidth = 320\ndisplayHeight = 240\nflipCamera = 2\n# print(cv.__version__)\n\n# camera settings\ncameraSettings ='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method=' + str(flipCamera) + ' ! video/x-raw, width=' + str(displayWidth) + ', height='+str(displayHeight)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\n#camera\ncamera = cv.VideoCapture(cameraSettings)\n# print(cv.getBuildInformation())\n\n#loops throug each frame of the camera to display video\nwhile True:\n #getFrame returns a bool value 1 for successfully getting frame.\n #Frame is the actual image of the frame\n getFrame, frame = camera.read()\n frame = cv.rotate(frame, cv.ROTATE_180)\n cv.imshow('piCamera', frame)\n #set up key to exit camera\n #q will exit loop and stQop showing video\n if cv.waitKey(1) == ord('q'):\n break\ncamera.release()\ncv.destroyAllWindows()\n","repo_name":"antoninonguyen/SeniorProject","sub_path":"senor project code rev 1/code/Modules/Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3894255519","text":"#!/usr/bin/env python\n\n\"\"\"\nA command line tool to run the mako templating engine\n\"\"\"\n\nimport sys\nimport os.path\nimport mako.exceptions\nimport mako.lookup\nimport mako.template\nimport config.openbook\n\nfrom scripts import attr\n\n\ndef main():\n \"\"\" main entry point \"\"\"\n if len(sys.argv) < 6:\n raise ValueError(f\"command line issue [{sys.argv}]\")\n\n input_encoding = \"utf-8\"\n output_encoding = \"utf-8\"\n p_book = bool(int(sys.argv[1]))\n p_cut = bool(int(sys.argv[2]))\n p_cutnum = int(sys.argv[3])\n p_output = sys.argv[4]\n p_input = sys.argv[5:]\n common = \"include/common.ly.mako\"\n\n # We really need the unlink, even though we have *open a file\n # for writing* later on which is supposed to truncate the file to 0\n # since we chmod the output to be unwritable which means that the\n # *open a file for writing* later would fail...\n if os.path.isfile(p_output):\n os.unlink(p_output)\n\n # if there is any error, remove the output to prevent having\n # bad output...\n try:\n mylookup = mako.lookup.TemplateLookup(\n directories=[\".\"],\n input_encoding=input_encoding,\n output_encoding=output_encoding,\n )\n template = mako.template.Template(\n filename=common,\n lookup=mylookup,\n input_encoding=input_encoding,\n output_encoding=output_encoding,\n )\n gattr = {}\n if p_book:\n filelist = p_input\n filelist.sort()\n gattr[\"files\"] = filelist\n gattr[\"book\"] = True\n gattr[\"toc\"] = True\n gattr[\"midi\"] = False\n # put each tune in its own book part (avoid lilypond performance problems)\n gattr[\"parts\"] = True\n else:\n gattr[\"files\"] = p_input\n gattr[\"book\"] = False\n gattr[\"toc\"] = False\n gattr[\"midi\"] = True\n gattr[\"parts\"] = False\n gattr[\"inline\"] = True\n # put some space after each tune\n gattr[\"space_after_tune\"] = False\n # put a page break after each tune\n gattr[\"break_after_tune\"] = False\n # put a page break after the toc?\n gattr[\"break_after_toc\"] = True\n gattr[\"lilypond_version\"] = config.openbook.lilypond_version\n attributes = attr.Attributes()\n if p_cut:\n template.render(attributes=attributes, gattr=gattr, scratch={})\n attributes.cut(p_cutnum, p_output)\n else:\n with open(p_output, \"wb\") as file:\n file.write(template.render(attributes=attributes, gattr=gattr, scratch={}))\n os.chmod(p_output, 0o0444)\n except Exception as exception:\n if os.path.isfile(p_output):\n os.unlink(p_output)\n print(mako.exceptions.text_error_template().render())\n raise exception\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"veltzer/openbook","sub_path":"scripts/wrapper_mako.py","file_name":"wrapper_mako.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"62"} +{"seq_id":"34569721470","text":"import requests\nfrom pprint import pprint\n\ndef get_all_bookings():\n \"\"\"\n Creates a request to fetch all bookings\n \"\"\"\n response = requests.get(\"http://localhost:8085/assignment/bookings\", json={})\n pprint(response.json())\n\nif __name__ == '__main__':\n get_all_bookings()\n\n\n","repo_name":"karanjsingh/Hall-Booking-API","sub_path":"test/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38923104648","text":"class Student:\n def __init__(self, id, name, language=\"Python\"): # define default value\n self.id = id\n self.name = name\n self.language = language\n\n def __str__(self):\n return f'[{self.id}] {self.name} studies {self.language}'\n\n\nprint(Student(1, 'Bob', \"Python\")) # should print: [1] Bob studies Python\nprint(Student(2, 'Mark', \"Java\")) # should print: [2] Mark studies Java\nprint(Student(3, 'Kate')) # should print: [3] Kate studies Python\n\n# when we want to initialize class it get two parameter but\n# in the last line we instantiate class with just one argument.In this case we get an error.\n# but if i have default value for every argument we can define the defult value in function\n# and in this case if you does not pass the argument the default value assign to it.\n","repo_name":"autoplug/IBS-class","sub_path":"First Semester/week10/exam3.py","file_name":"exam3.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73222621316","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 29 09:44:11 2021\n\n@author: gokul\n\"\"\"\n\nclass Node:\n def __init__(self, val = None, next = None, prev = None):\n self.val = val\n self.next = next\n self.prev = prev\n \n \n \nclass doubly_linked_list:\n def __init__(self):\n self.head = None\n self.tail = None\n \n \n def print_forward(self):\n if self.head == None:\n return(\"Linked List is empty\") \n else:\n ele = self.head \n llstr = '' \n while ele:\n llstr += str(ele.val)+'-->' if ele.next else str(ele.val)\n ele = ele.next \n print(llstr)\n \n \n def print_backward(self):\n if self.tail == None:\n return(\"Linked List is empty\") \n else:\n ele = self.tail \n llstr = '' \n while ele:\n llstr += str(ele.val)+'-->' if ele.prev else str(ele.val)\n ele = ele.prev \n print(llstr)\n \n \n def __len__(self):\n count = 0\n ele = self.head\n while ele:\n count += 1\n ele = ele.next\n \n return count\n \n \n def insert_at_beginning(self, val):\n node = Node(val, self.head, None)\n self.head = node\n \n ele = self.head\n while ele.next:\n ele.next.prev = ele\n ele = ele.next\n self.tail = ele\n \n \n def insert_at_end(self, val):\n ele = self.tail\n if self.tail is None:\n node = Node(val, None, None)\n self.head = node \n self.tail = node\n else:\n ele.next = Node(val, None, ele)\n self.tail = ele.next\n \n \n def insert_at_position(self, idx, val):\n if idx < 0 or idx > self.__len__():\n raise IndexError(\"Invalid Index\")\n \n else:\n if idx == 0:\n self.insert_at_beginning(val)\n return\n elif idx == self.__len__():\n self.insert_at_end(val)\n return\n \n count = 0\n ele = self.head\n while ele:\n if count == idx - 1:\n node = Node(val, ele.next, ele)\n ele.next = node\n break\n \n ele = ele.next\n count += 1\n\n\n def insert_after_value(self, val_after, val_to_insert):\n ele = self.head\n while ele:\n if ele.val == val_after:\n node = Node(val_to_insert, ele.next, ele)\n ele.next = node\n break\n ele = ele.next\n\n\n def insert_multiple_values(self, list_of_values):\n for val in list_of_values:\n self.insert_at_end(val)\n \n \n def remove_at_position(self, idx):\n if idx < 0 or idx >= self.__len__():\n raise IndexError(\"Invalid Index\")\n else:\n if idx == 0:\n self.head = self.head.next\n return\n \n count = 0\n ele = self.head\n while ele:\n if count == idx - 1:\n ele.next = ele.next.next\n ele.next.prev = ele\n break\n \n ele = ele.next\n count += 1\n \n \n def remove_by_value(self, val):\n ele = self.head\n while ele.next:\n list_val = ele.next.val\n if list_val == val:\n ele.next = ele.next.next\n ele.next.prev = ele\n break\n \n ele = ele.next\n \n\n \nif __name__ == '__main__':\n dl = doubly_linked_list()\n dl.insert_at_beginning(5)\n dl.insert_at_beginning(625)\n dl.insert_at_end(225)\n dl.insert_at_position(3, \"^v^\")\n dl.insert_multiple_values(['a','b', 100, 'Century'])\n dl.insert_after_value(225, 'XD')\n dl.print_forward()\n dl.print_backward() \n \n dl.remove_at_position(5)\n dl.remove_at_position(4)\n dl.print_forward()\n dl.print_backward() \n \n dl.remove_by_value(100)\n dl.remove_by_value(225) \n dl.print_forward()\n dl.print_backward() \n print(len(dl))\n ","repo_name":"GokulHunt/Data_Structures","sub_path":"LinkedList/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6481099325","text":"from django.urls import path\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom rest_framework import routers, serializers, viewsets\nfrom rest_framework_simplejwt import views as jwt_views\nfrom .views import UserList, UserIdViewSet, UserDetail\nfrom worlds.views import WorldViewSet, StoryViewSet\n\n# Serializers define the API representation.\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ['url', 'username', 'email', 'is_staff']\n\n# ViewSets define the view behavior.\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'profile', UserIdViewSet, basename=\"UserId\")\nrouter.register(r'api/story', StoryViewSet, basename=\"Story\")\nrouter.register(r'api/world', WorldViewSet, basename=\"World\")\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n url(r'^', include(router.urls)),\n path('admin/', admin.site.urls),\n path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),\n path('users/', UserList.as_view()),\n path('users//', UserDetail.as_view()),\n]","repo_name":"pyritewolf/cosmotecton","sub_path":"backend/cosmotecton/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"127822269","text":"import numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom typing import Union, List, Dict\nimport scipy.sparse as sp\n\nfrom eaopack.assets import Timegrid\n\nclass Results:\n def __init__(self, value:float, x: np.array, duals: dict):\n \"\"\" collection of optimization results\n\n Args:\n value (float): value of optimized target function\n x (np.array): optimal values of variables\n duals (dict): dual values for constraints. Dict with constraint types as keys (mostly interesting 'N' for nodes)\n \"\"\"\n self.value = value\n self.x = x\n self.duals = duals\n\nclass OptimProblem:\n\n def __init__(self, \n c: np.array, \n l:np.array, \n u:np.array, \n A:np.array = None, \n b:np.array = None, \n cType:str = None ,\n mapping:pd.DataFrame = None,\n timegrid:Timegrid = None, # needed if periodic\n periodic_period_length:str = None,\n periodic_duration:str = None,\n map_nodal_restr:list = None \n ):\n \"\"\" Formulated optimization problem. LP problem.\n\n Args:\n c (np.array): cost vector\n l (np.array): lower bound (per time step)\n u (np.array): upper bound (per time step)\n A (np.array): restiction matrix. Optional. Defaults to None (no restrictions given)\n b (np.array): bound of restriction. Optional. Defaults to None (no restrictions given)\n cType (str - one letter per restriction): Logic to define type of restriction: U-pper, L-ower, S-equal or other \n here specific types may be defined:\n sum of dispatch at node zero: N\n Optional. Defaults to None (no restrictions given)\n mapping (pd.DataFrame): Mapping of variables to 'asset', 'node', 'type' ('d' - dispatch and 'i' internal variable) and 'time_step' \n map_nodal_restr (list): Mapping to identify node and timepoint for duals in the solution of the LP\n\n --- if periodic\n timegrid (Timegrid) : timegrid underneath optim problem (defaults to None)\n periodic_period_length (str) : pandas freq defining the period length (defaults to None)\n periodic_duration (str) : pandas freq defining the duration of intervals in which periods are repeated (defaults to None - then duration is inf)\n \"\"\"\n self.c = c # cost vector\n self.l = l # lower bound\n self.u = u # upper bound\n self.A = A # restriction matrix\n self.b = b # restriction result vector\n self.cType = cType # GLPK type of restriction (le, ge, ...) \n self.mapping = mapping \n self.map_nodal_restr = map_nodal_restr\n assert not np.isnan(c.sum()), 'nan value in optim problem. Check input data -- c'\n assert not np.isnan(l.sum()), 'nan value in optim problem. Check input data -- l'\n assert not np.isnan(u.sum()), 'nan value in optim problem. Check input data -- u'\n if not b is None: assert not np.isnan(b.sum()), 'nan value in optim problem. Check input data -- b'\n\n # make periodic\n if periodic_period_length is not None:\n assert timegrid is not None, 'for periodic optim problem need timegrid'\n self.__make_periodic__(freq_period = periodic_period_length , freq_duration = periodic_duration, timegrid = timegrid)\n\n def __make_periodic__(self, freq_period:str, freq_duration:str, timegrid:Timegrid):\n \"\"\" Make the optimization problem periodic main purpose is to save resources when optimizing \n granular problems over a long time -- e.g. a year with hourly resolution -- where the\n finer resolution shows periodic behaviour -- e.g. typical load profiles over the day\n\n The routine is typically called during init of optim problem\n\n Args:\n freq_period (str): [description]\n freq_duration (str): [description]\n \"\"\"\n\n # (1) create mapping of timegrid to periodicity intervals #################################\n # We create a numbering 0, 1, ... for each period\n # and identify duration intervals. In case there's an overlap between periods and durations\n # periods are leading. However - best choice are frequencies that don't create this problem\n\n # disp factor column needed to assign same dispatch to all related time steps\n if 'disp_factor' not in self.mapping.columns: self.mapping['disp_factor'] = 1.\n self.mapping['disp_factor'].fillna(1., inplace = True) # ensure there's a one where not assigned yet\n tp = timegrid.timepoints\n T = timegrid.T\n try: periods = pd.date_range(tp[0]-pd.Timedelta(1, freq_period), tp[-1]+pd.Timedelta(1, freq_period), freq = freq_period, tz = timegrid.tz)\n except: periods = pd.date_range(tp[0]-pd.Timedelta(freq_period), tp[-1]+pd.Timedelta(freq_period), freq = freq_period, tz = timegrid.tz)\n if freq_duration is None:\n durations = [tp[0], tp[-1]+(tp[-1]-tp[0])] # whole interval - generously extending end\n else:\n try: durations = pd.date_range(tp[0]-pd.Timedelta(1, freq_duration), tp[-1]+pd.Timedelta(1, freq_duration), freq = freq_duration, tz = timegrid.tz)\n except: durations = pd.date_range(tp[0]-pd.Timedelta(freq_duration), tp[-1]+pd.Timedelta(freq_duration), freq = freq_duration, tz = timegrid.tz) \n # gave a bit space in case date ranges do not have the same start - deleting now superfluous (early) start\n if periods[1] <= tp[0]: periods = periods.drop(periods[0])\n if durations[1] <= tp[0]: durations = durations.drop(durations[0])\n # assertions - ensure that all periods are of same length\n d = periods[1:]-periods[0:-1]\n assert all(d==d[0]), 'Error. All periods must have same length. Not given for chosen frequency '+periods\n ### create df that assigns periods and duration intervals to all tp's\n df = pd.DataFrame(index = timegrid.I)\n df['dur'] = np.nan\n df['per'] = np.nan\n df['sub_per'] = np.nan\n i_dur = 0\n i_per = 0\n i_sub_per = 0\n # df['dur'].iloc[0] = 0\n # df['per'].iloc[0] = 0\n ind_dur = df.columns.get_indexer(['dur'])[0]\n ind_per = df.columns.get_indexer(['per'])[0]\n ind_sub_per = df.columns.get_indexer(['sub_per'])[0]\n df.iloc[0, ind_dur] = 0\n df.iloc[0, ind_per] = 0\n for i in range(0,T):\n if tp[i] >= durations[i_dur]: \n i_dur +=1\n # df['dur'].iloc[i] = int(i_dur)\n df.iloc[i, ind_dur] = int(i_dur)\n if tp[i] >= periods[i_per]: \n i_per +=1\n # df['per'].iloc[i] = int(i_per)\n df.iloc[i, ind_per] = int(i_per)\n i_sub_per = 0\n # df['sub_per'].iloc[i] = int(i_sub_per)\n df.iloc[i, ind_sub_per] = int(i_sub_per)\n i_sub_per += 1\n \n df.ffill(inplace = True)\n df['per'] = df['per'].astype(int)\n df['dur'] = df['dur'].astype(int)\n df['sub_per'] = df['sub_per'].astype(int)\n\n self.mapping = pd.merge(self.mapping, df, left_on = 'time_step', right_index = True, how = 'left')\n self.mapping['new_idx'] = self.mapping.index\n idx = np.asarray(self.mapping.index).copy() # get mapping index to change it later on\n # (2) loop through each variable-group and group together all #################################\n # elements that belong to the same period item\n all_out = [] # collect all vars to remove\n for myasset in self.mapping['asset'].unique():\n for mynode in self.mapping['node'].unique():\n for mytype in self.mapping['type'].unique():\n for myvar in list(self.mapping['var_name'].unique()):\n I = (self.mapping['asset'] == myasset)&(self.mapping['node'] == mynode)&(self.mapping['var_name'] == myvar)&(self.mapping['type'] == mytype)\n # loop through durations\n for dur in self.mapping.loc[I].dur.unique():\n # loop through period steps\n for sub_per in self.mapping.loc[I & (self.mapping['dur'] == dur)].sub_per.unique():\n II = I & (self.mapping['dur'] == dur) & (self.mapping['sub_per'] == sub_per)\n if II.sum() <= 1:\n pass ## Nothing to do. There is only one variable\n else:\n vars = self.mapping.index[II] # variables to be joined\n leading = vars[0] # this one to remain\n out = vars[1:]\n all_out +=out.to_list()\n # shrink optimization problem\n #### u, l, c\n # bounds should ideally be equal anyhow. here choose average\n self.l[leading] = self.l[vars].mean()\n self.u[leading] = self.u[vars].mean()\n # leading variable takes joint role - thus summing up costs\n self.c[leading] = self.c[vars].sum()\n #### if given, A (b and cType refer to restrictions)\n # need to add up A elements for vars to be deleted in A elements for leading var\n if self.A is not None:\n self.A = self.A.tolil()\n self.A[:,leading] += self.A[:,out].sum(axis = 1)\n # Adjust mapping. \n assert all(self.mapping.loc[II, 'disp_factor'] == self.mapping.loc[II, 'disp_factor'].iloc[0]), 'periodicity cannot be imposed where disp factors are not identical'\n idx[out] = leading\n self.mapping.loc[out, 'new_idx'] = leading\n\n self.l = np.delete(self.l,all_out)\n self.u = np.delete(self.u,all_out) \n self.c = np.delete(self.c,all_out)\n if self.A is not None:\n my_idx = self.mapping.index.unique() # full index\n my_idx = np.delete(my_idx, all_out)\n self.A = self.A[:,my_idx]\n self.mapping.drop(columns = ['dur','per','sub_per'], inplace = True)\n self.mapping.set_index('new_idx', inplace = True)\n\n\n def optimize(self, target = 'value',\n samples = None,\n interface:str = 'cvxpy', \n solver = None,\n make_soft_problem=False,\n rel_tol:float = 1e-3, \n iterations:int = 5000)->Results:\n \"\"\" optimize the optimization problem\n\n Args:\n target (str): Target function. Defaults to 'value' (maximize DCF). \n Alternative: 'robust', maximizing the minimum DCF across given price samples\n samples (List): Samples to be used in specific optimization targets\n - Robust optimization: list of costs arrays (maximizing minimal DCF)\n interface (str, optional): Chosen interface architecture. Defaults to 'cvxpy'.\n solver (str, optional): Solver for interface. Defaults to None\n make_soft_problem (bool, optional): If true, relax the boolean variables and allow float values instead. Defaults to False\n INACTIVE rel_tol (float): relative tolerance for solver\n INACTIVE iterations (int): max number of iterations for solver\n INACTIVE decimals_res (int): rounding results to ... decimals. Defaults to 5\n \"\"\"\n # check optim problem\n if interface == 'cvxpy':\n import cvxpy as CVX\n\n # Construct the problem\n\n # variable to optimize. Note: may add differentiation of variables and constants in case lower and upper bounds are equal\n map = self.mapping.copy() # abbreviation\n\n if make_soft_problem:\n map['bool'] = False\n\n isMIP = False\n if 'bool' in map:\n my_bools = map.loc[(~map.index.duplicated(keep='first'))&(map['bool'])].index.values.tolist()\n my_bools = [(bb,) for bb in my_bools]\n if len(my_bools)==0: \n my_bools = False\n else:\n isMIP = True ### !!! Need to change solver\n # print('...MIP problem configured. Beware of potentially long optimization and other issues inherent to MIP')\n else:\n my_bools = False\n x = CVX.Variable(self.c.size, boolean = my_bools)\n ##### put together constraints\n constr_types = {} # dict to remember constraint type and numbering to extract duals\n # lower and upper bound constraints # 0 & 1\n constraints = [ x <= self.u, x>=self.l ]\n\n constr_types['bound_u'] = 0 # first CVX constraint \n constr_types['bound_l'] = 1 # second CVX constraint ...\n counter_constr_type = 1 # keep track of number of constraint types to be able to identify\n\n if not self.A is None: \n assert (len(self.b) == len(self.cType)) and (len(self.b) == self.A.shape[0]) and (len(self.u) == self.A.shape[1])\n #UPPER limit\n my_type = \"U\"\n # identify rows\n myRows = [mya==my_type for mya in self.cType] \n self.A = self.A.tolil() # check - necessary? Leftover?\n if any(myRows):\n counter_constr_type += 1\n constr_types[my_type] = counter_constr_type\n myRows = np.asarray(myRows)\n AU = self.A[myRows, :]\n bU = np.asarray(self.b)\n bU = bU[myRows]\n constraints = constraints + [ AU @ x<=bU ] # matrix/vector multiplication in CVXPY notation\n #LOWER limit \n my_type = \"L\"\n myRows = [mya==my_type for mya in self.cType] \n if any(myRows):\n counter_constr_type += 1\n constr_types[my_type] = counter_constr_type\n myRows = np.asarray(myRows)\n AL = self.A[myRows, :]\n bL = np.asarray(self.b)\n bL = bL[myRows]\n constraints = constraints + [ AL @ x>=bL ]\n #EQUAL constraints \n my_type = \"S\"\n myRows = [mya==my_type for mya in self.cType] \n if any(myRows):\n counter_constr_type += 1\n constr_types[my_type] = counter_constr_type\n myRows = np.asarray(myRows)\n AS = self.A[myRows, :]\n bS = np.asarray(self.b)\n bS = bS[myRows]\n constraints = constraints + [ AS @ x==bS ]\n\n # Nodal constraints (special interpretation, but essentially type EQUAL)\n my_type = \"N\"\n myRows = [mya==my_type for mya in self.cType] \n if any(myRows):\n counter_constr_type += 1\n constr_types[my_type] = counter_constr_type\n myRows = np.asarray(myRows)\n AN = self.A[myRows, :]\n bN = np.asarray(self.b)\n bN = bN[myRows]\n constraints = constraints + [ AN @ x==bN ]\n\n # Target function - alternatives possible\n if target.lower() == 'value':\n objective = -self.c.T @ x # @ is the matrix/vector multiplication in CVXPY notation\n elif target.lower() == 'robust':\n assert (not samples is None) # need samples\n if (isinstance(samples[0], (float, int))):\n raise ValueError('For robust optimization, samples must be list of arrays ')\n # (1) new variable, representing the minimum DCF\n DCF_min = CVX.Variable(1)\n # (2) each price sample represents a new restriction DCF_sample >= DCF_min\n for myc in samples:\n constraints = constraints + [-myc.T @ x >= DCF_min ] # sign: maximize negative costs\n objective = DCF_min\n else:\n raise NotImplementedError('Target function -- '+target+' -- not implemented')\n\n\n prob = CVX.Problem(CVX.Maximize(objective), constraints)\n\n if solver is None:\n prob.solve() # no rel_tol parameter here\n else:\n prob.solve(solver = getattr(CVX, solver)) \n # if isMIP: solver = 'GLPK_MI'\n # else: solver = 'ECOS'\n \n\n if prob.status == 'optimal':\n # print(\"Status: \" +prob.status)\n # print('Portfolio Value: ' + '% 6.0f' %prob.value)\n\n if not isMIP:\n # collect duals in dictionary according to cTypes\n myduals = {}\n for myt in constr_types:\n myduals[myt] = constraints[constr_types[myt]].dual_value\n else:\n myduals = None\n results = Results(value = prob.value,\n x = x.value,\n duals = myduals)\n if target.lower() == 'robust':\n # in case of robust target, the optimized value is the minimum\n results.value = -sum(x.value * self.c)\n elif prob.status == 'optimal_inaccurate':\n print('Optimum found, but inaccurate: ' + prob.status) \n results = 'inaccurate'\n else:\n print('Optimization not successful: ' + prob.status) \n results = 'not successful'\n else:\n raise NotImplementedError('Solver - '+str(solver)+ ' -not implemented')\n\n return results\n\n\nclass SplitOptimProblem(OptimProblem):\n def __init__(self, ops, mapping):\n \"\"\" Collection of consecutive OptimProblems\n\n Args:\n ops: List of OptimProblems\n mapping (pd.DataFrame): Mapping of all result variables to 'asset', 'node', 'type' ('d' - dispatch and 'i' internal variable) and 'time_step'\n \"\"\"\n self.ops = ops\n self.mapping = mapping\n self.c = np.hstack([op.c for op in ops])\n if not ops[0].map_nodal_restr is None:\n self.map_nodal_restr = []\n for op in ops:\n self.map_nodal_restr += op.map_nodal_restr\n else:\n self.map_nodal_restr = None\n\n def optimize(self, *args, **kwargs) -> Results:\n \"\"\" Optimize all OptimProblems in self.ops and piece the results together in one Result\n \"\"\"\n res = Results(0, np.array([]), None)\n for op in self.ops:\n res_tmp = op.optimize(*args, **kwargs)\n res.value += res_tmp.value\n res.x = np.hstack((res.x, res_tmp.x))\n if res_tmp.duals:\n if res.duals:\n for key in res_tmp.duals:\n if res.duals[key] is not None:\n if res_tmp.duals[key] is None:\n res.duals[key] = None\n else:\n res.duals[key] = np.hstack((res.duals[key], res_tmp.duals[key]))\n else:\n res.duals = res_tmp.duals\n return res\n\n\n","repo_name":"EnergyAssetOptimization/EAO","sub_path":"eaopack/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":20406,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"62"} +{"seq_id":"75342452678","text":"from pwn import *\nimport time\nimport sys\n\n\ndef exploit():\n raw_input('wait')\n buf = '%9$018p'\n proc.sendline(buf)\n proc.recvuntil('can you tell me their sum?\\n')\n guess = int(proc.recv(18), 16)\n print(guess >> 32)\n print(guess & 0x00000000ffffffff)\n guess = (guess >> 32) + (guess & 0x00000000ffffffff)\n proc.sendline(str(guess))\n\n\nif __name__ == '__main__':\n context.arch = 'amd64'\n connect = 'nc shell.angstromctf.com 1235'\n connect = connect.split(' ')\n if len(sys.argv) > 1:\n proc = remote(connect[1], int(connect[2]))\n else:\n proc = process(['./guessPublic64'], env={'LD_LIBRARY_PATH': './'})\n exploit()\n proc.interactive()\n","repo_name":"JackGrence/ctf-write-ups","sub_path":"2018/angstrom/pwn/number_guess/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30448431644","text":"import utils\nimport os\nimport preferences\n\nAPP_LOCATION = \"/Applications/happymac.app\"\nSETUP_SCRIPT = 'tell application \"System Events\" to make login item at end with properties {path:\"%s\", hidden:false}' % APP_LOCATION\nLAUNCH_AT_LOGIN_KEY = \"ENABLE_LAUNCH_AT_LOGIN\"\n\nif os.path.exists(APP_LOCATION):\n if preferences.get(LAUNCH_AT_LOGIN_KEY):\n preferences.set(LAUNCH_AT_LOGIN_KEY, \"true\")\n utils.run_osa_script(SETUP_SCRIPT)\n\n","repo_name":"laffra/happymac","sub_path":"src/versions/v00001/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"62"} +{"seq_id":"4995977303","text":"from math import floor, sqrt\nimport random\nimport torch\n\n\ndef schlichtkrull_std(shape, gain):\n \"\"\"\n a = \\text{gain} \\times \\frac{3}{\\sqrt{\\text{fan\\_in} + \\text{fan\\_out}}}\n \"\"\"\n fan_in, fan_out = shape[0], shape[1]\n return gain * 3.0 / sqrt(float(fan_in + fan_out))\n\ndef schlichtkrull_normal_(tensor, shape, gain=1.):\n \"\"\"Fill the input `Tensor` with values according to the Schlichtkrull method, using a normal distribution.\"\"\"\n std = schlichtkrull_std(shape, gain)\n with torch.no_grad():\n return tensor.normal_(0.0, std)\n\ndef schlichtkrull_uniform_(tensor, gain=1.):\n \"\"\"Fill the input `Tensor` with values according to the Schlichtkrull method, using a uniform distribution.\"\"\"\n std = schlichtkrull_std(tensor, gain)\n with torch.no_grad():\n return tensor.uniform_(-std, std)\n\ndef select_b_init(init):\n \"\"\"Return functions for initialising biases\"\"\"\n init = init.lower()\n if init in ['zeros', 'zero', 0]:\n return torch.nn.init.zeros_\n elif init in ['ones', 'one', 1]:\n return torch.nn.init.ones_\n elif init == 'uniform':\n return torch.nn.init.uniform_\n elif init == 'normal':\n return torch.nn.init.normal_\n else:\n raise NotImplementedError(f'{init} initialisation has not been implemented!')\n\ndef select_w_init(init):\n \"\"\"Return functions for initialising weights\"\"\"\n init = init.lower()\n if init in ['glorot-uniform', 'xavier-uniform']:\n return torch.nn.init.xavier_uniform_\n elif init in ['glorot-normal', 'xavier-normal']:\n return torch.nn.init.xavier_normal_\n elif init == 'schlichtkrull-uniform':\n return schlichtkrull_uniform_\n elif init == 'schlichtkrull-normal':\n return schlichtkrull_normal_\n elif init in ['normal', 'standard-normal']:\n return torch.nn.init.normal_\n elif init == 'uniform':\n return torch.nn.init.uniform_\n else:\n raise NotImplementedError(f'{init} initialisation has not been implemented!')\n\ndef drop_edges(triples, num_nodes, general_edo, self_loop_edo):\n \"\"\" Performs edge dropout by actually removing the triples \"\"\"\n general_keep = 1.0 - general_edo\n self_loop_keep = 1.0 - self_loop_edo\n\n # Notes: self-loop triples were appended to the end of the list in add_inverse_and_self\n nt = triples.size(0) - num_nodes\n\n general_keep_ind = random.sample(range(nt), k=int(floor(general_keep * nt)))\n self_loop_keep_ind = random.sample(range(nt, nt + num_nodes), k=int(floor(self_loop_keep * num_nodes)))\n ind = general_keep_ind + self_loop_keep_ind\n\n return triples[ind, :]\n\ndef sum_sparse(indices, values, size, row_normalisation=True, device='cpu'):\n \"\"\"\n Sum the rows or columns of a sparse matrix, and redistribute the\n results back to the non-sparse row/column entries\n Arguments are interpreted as defining sparse matrix.\n\n Source: https://github.com/pbloem/gated-rgcn/blob/1bde7f28af8028f468349b2d760c17d5c908b58b/kgmodels/util/util.py#L304\n \"\"\"\n\n assert len(indices.size()) == len(values.size()) + 1\n\n k, r = indices.size()\n\n if not row_normalisation:\n # Transpose the matrix for column-wise normalisation\n indices = torch.cat([indices[:, 1:2], indices[:, 0:1]], dim=1)\n size = size[1], size[0]\n\n ones = torch.ones((size[1], 1), device=device)\n if device == 'cuda':\n values = torch.cuda.sparse.FloatTensor(indices.t(), values, torch.Size(size))\n else:\n values = torch.sparse.FloatTensor(indices.t(), values, torch.Size(size))\n sums = torch.spmm(values, ones)\n sums = sums[indices[:, 0], 0]\n\n return sums.view(k)\n\n\ndef generate_inverses(triples, num_rels):\n \"\"\" Generates nverse relations \"\"\"\n\n # Swap around head and tail. Create new relation ids for inverse relations.\n inverse_relations = torch.cat([triples[:, 2, None], triples[:, 1, None] + num_rels, triples[:, 0, None]], dim=1)\n assert inverse_relations.size() == triples.size()\n\n return inverse_relations\n\n\ndef generate_self_loops(triples, num_nodes, num_rels, self_loop_keep_prob, device='cpu'):\n \"\"\" Generates self-loop triples and then applies edge dropout \"\"\"\n\n # Create a new relation id for self loop relation.\n all = torch.arange(num_nodes, device=device)[:, None]\n id = torch.empty(size=(num_nodes, 1), device=device, dtype=torch.long).fill_(2*num_rels)\n self_loops = torch.cat([all, id, all], dim=1)\n assert self_loops.size() == (num_nodes, 3)\n\n # Apply edge dropout\n mask = torch.bernoulli(torch.empty(size=(num_nodes,), dtype=torch.float, device=device).fill_(\n self_loop_keep_prob)).to(torch.bool)\n self_loops = self_loops[mask, :]\n\n return torch.cat([triples, self_loops], dim=0)\n\n\ndef add_inverse_and_self(triples, num_nodes, num_rels, device='cpu'):\n \"\"\" Adds inverse relations and self loops to a tensor of triples \"\"\"\n\n # Swap around head and tail. Create new relation ids for inverse relations.\n inverse_relations = torch.cat([triples[:, 2, None], triples[:, 1, None] + num_rels, triples[:, 0, None]], dim=1)\n assert inverse_relations.size() == triples.size()\n\n # Create a new relation id for self loop relation.\n all = torch.arange(num_nodes, device=device)[:, None]\n id = torch.empty(size=(num_nodes, 1), device=device, dtype=torch.long).fill_(2*num_rels)\n self_loops = torch.cat([all, id, all], dim=1)\n assert self_loops.size() == (num_nodes, 3)\n\n # Note: Self-loops are appended to the end and this makes it easier to apply different edge dropout rates.\n return torch.cat([triples, inverse_relations, self_loops], dim=0)\n\ndef stack_matrices(triples, num_nodes, num_rels, vertical_stacking=True, device='cpu'):\n \"\"\"\n Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all\n relations are stacked vertically).\n \"\"\"\n assert triples.dtype == torch.long\n\n r, n = num_rels, num_nodes\n size = (r * n, n) if vertical_stacking else (n, r * n)\n\n fr, to = triples[:, 0], triples[:, 2]\n offset = triples[:, 1] * n\n if vertical_stacking:\n fr = offset + fr\n else:\n to = offset + to\n\n indices = torch.cat([fr[:, None], to[:, None]], dim=1).to(device)\n\n assert indices.size(0) == triples.size(0)\n assert indices[:, 0].max() < size[0], f'{indices[0, :].max()}, {size}, {r}'\n assert indices[:, 1].max() < size[1], f'{indices[1, :].max()}, {size}, {r}'\n\n return indices, size\n\ndef block_diag(m):\n \"\"\"\n Source: https://gist.github.com/yulkang/2e4fc3061b45403f455d7f4c316ab168\n Make a block diagonal matrix along dim=-3\n EXAMPLE:\n block_diag(torch.ones(4,3,2))\n should give a 12 x 8 matrix with blocks of 3 x 2 ones.\n Prepend batch dimensions if needed.\n You can also give a list of matrices.\n \"\"\"\n\n device = 'cuda' if m.is_cuda else 'cpu' # Note: Using cuda status of m as proxy to decide device\n\n if type(m) is list:\n m = torch.cat([m1.unsqueeze(-3) for m1 in m], -3)\n\n dim = m.dim()\n n = m.shape[-3]\n\n siz0 = m.shape[:-3]\n siz1 = m.shape[-2:]\n\n m2 = m.unsqueeze(-2)\n\n eye = attach_dim(torch.eye(n, device=device).unsqueeze(-2), dim - 3, 1)\n\n return (m2 * eye).reshape(\n siz0 + torch.Size(torch.tensor(siz1) * n)\n )\n\ndef attach_dim(v, n_dim_to_prepend=0, n_dim_to_append=0):\n return v.reshape(torch.Size([1] * n_dim_to_prepend) + v.shape + torch.Size([1] * n_dim_to_append))\n\ndef split_spo(triples):\n \"\"\" Splits tensor into subject, predicate and object \"\"\"\n if len(triples.shape) == 2:\n return triples[:, 0], triples[:, 1], triples[:, 2]\n else:\n return triples[:, :, 0], triples[:, :, 1], triples[:, :, 2]\n","repo_name":"thiviyanT/torch-rgcn","sub_path":"torch_rgcn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"62"} +{"seq_id":"5314529716","text":"from etc.ClientBase import ClientBase\nfrom etc.settings import db_string\n\n\n# Database Client (Controller to postgres db)\nclass DbClient(ClientBase):\n\n def __init__(self):\n ClientBase.__init__(self, db_string)\n\n def search_artists(self, searchText):\n\n string = \"\"\"\n select *\n from shazamablam.artists\n where name ~* :searchText\n \"\"\"\n params = { 'searchText': searchText }\n return self.execute(string, params)\n","repo_name":"aymather/shazamablam","sub_path":"api/etc/DbClient.py","file_name":"DbClient.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19191459616","text":"import os\r\nimport re\r\nimport struct\r\nimport base64\r\n\r\n\r\ndef read_proto(file):\r\n try:\r\n f = open(file, \"r\")\r\n lines = f.readlines()\r\n f.close()\r\n except FileNotFoundError:\r\n print(\"找不到文件:\" + file)\r\n return False\r\n proto_name = os.path.basename(file).split(\".\")[0]\r\n need_import = []\r\n enum_dict = {}\r\n return_dict = {}\r\n prop_name = {}\r\n message_return_dict = {}\r\n message_prop_name = {}\r\n other_message = {}\r\n save = False\r\n for line in lines:\r\n if line.startswith(\"import\"):\r\n file_whole_name = re.findall(r'\"(.*)\"', re.split(\" \", line)[1])[0]\r\n file_name = re.sub(\".proto\", \"\", file_whole_name)\r\n need_import.append(file_name)\r\n else:\r\n split_line = re.split(\" \", line)\r\n data_type = re.sub(\"\\\\t\", \"\", split_line[0])\r\n if data_type == \"}\\n\":\r\n save = False\r\n return_dict = {}\r\n prop_name = {}\r\n continue\r\n elif data_type == \"message\" or data_type == \"enum\":\r\n save = False\r\n return_dict = {}\r\n prop_name = {}\r\n if save:\r\n if save == \"enum\": # 1个proto2个enum?自己改吧。\r\n data_id = int(re.findall(\"\\d+\", split_line[2])[0])\r\n enum_dict[data_id] = data_type\r\n else:\r\n if len(split_line) > 3: # 空行,忽略oneof\r\n if len(split_line) == 4:\r\n prop = split_line[1]\r\n data_id = int(re.findall(\"\\d+\", split_line[3])[0])\r\n return_dict[data_id] = data_type\r\n prop_name[data_id] = prop\r\n elif len(split_line) == 5: # repeated and map\r\n wire_type = re.sub(\"\\\\t\", \"\", split_line[0])\r\n if wire_type == \"repeated\":\r\n data_type = split_line[1]\r\n prop = split_line[2]\r\n data_id = int(re.findall(\"\\d+\", split_line[4])[0])\r\n return_dict[data_id] = \"repeated_\" + data_type\r\n prop_name[data_id] = prop\r\n else:\r\n data_type = wire_type + split_line[1]\r\n prop = split_line[2]\r\n data_id = int(re.findall(\"\\d+\", split_line[4])[0])\r\n return_dict[data_id] = data_type\r\n prop_name[data_id] = prop\r\n if save == \"message\":\r\n message_return_dict = return_dict\r\n message_prop_name = prop_name\r\n else:\r\n if save not in other_message:\r\n other_message[save] = [{}, {}]\r\n other_message[save][0].update(return_dict)\r\n other_message[save][1].update(prop_name)\r\n\r\n else:\r\n if data_type == \"message\":\r\n if split_line[1] == proto_name:\r\n save = \"message\"\r\n else:\r\n save = split_line[1]\r\n continue\r\n elif data_type == \"enum\":\r\n save = \"enum\"\r\n else:\r\n continue\r\n return need_import, enum_dict, message_return_dict, message_prop_name, other_message\r\n\r\n\r\ndef judge_type(prop_name):\r\n zero = [\"int32\", \"int64\", \"uint32\", \"uint64\", \"sint32\", \"sint64\", \"bool\", \"enum\"]\r\n one = [\"fixed64\", \"sfixed64\", \"double\"]\r\n five = [\"fixed32\", \"sfixed32\", \"float\"]\r\n if prop_name in zero:\r\n return 0\r\n elif prop_name in one:\r\n return 1\r\n elif prop_name in five:\r\n return 5\r\n else:\r\n return 2\r\n\r\n\r\ndef varint(now_location, byte_str):\r\n offset = 0\r\n data = byte_str[now_location] & 0b1111111\r\n while True:\r\n if byte_str[now_location] >> 7:\r\n offset += 1\r\n now_location += 1\r\n data = ((byte_str[now_location] & 0b1111111) << (7 * offset)) | data\r\n else:\r\n break\r\n return data, offset\r\n\r\n\r\ndef parse(byte_str, proto_name, *args):\r\n # len(args) == 2 传map的类型或嵌套message\r\n # len(args) == 3 传repeated的类型和data_id = 1\r\n # print(byte_str)\r\n # print(proto_name)\r\n file_path = os.getcwd()\r\n proto_name = file_path + \"\\proto\\\\\" + proto_name + \".proto\"\r\n need_import, enum_dict, encoding_rules, prop_name, other_message = read_proto(proto_name)\r\n # if not need_import and not need_import == []:\r\n # return False\r\n if args:\r\n encoding_rules, prop_name = args[0], args[1]\r\n # else:\r\n # encoding_rules, prop_name = read_proto(proto_name)\r\n decode_data = {}\r\n if len(args) == 3:\r\n list_decode_data = {\"1\": []}\r\n i = 0\r\n while i < len(byte_str) - 1:\r\n if len(args) == 3:\r\n data_id = args[2]\r\n data_type = judge_type(encoding_rules[data_id])\r\n else:\r\n data_type = byte_str[i] & 0b111\r\n data_id, offset = varint(i, byte_str)\r\n data_id >>= 3\r\n i += offset\r\n i += 1\r\n if data_id in encoding_rules:\r\n if data_type == 0:\r\n data, offset = varint(i, byte_str)\r\n int_type_list = [\"int32\", \"int64\", \"uint32\", \"uint64\", \"sint32\", \"sint64\"]\r\n if encoding_rules[data_id] == \"bool\":\r\n data = bool(data)\r\n elif encoding_rules[data_id] in int_type_list:\r\n pass\r\n else:\r\n if encoding_rules[data_id] in need_import:\r\n proto_name = file_path + \"\\proto\\\\\" + encoding_rules[data_id] + \".proto\"\r\n enum_dict = read_proto(proto_name)[1]\r\n data = enum_dict[data]\r\n decode_data[prop_name[data_id]] = data\r\n i += offset\r\n i += 1\r\n elif data_type == 1:\r\n if encoding_rules[data_id] == \"double\":\r\n decode_data[prop_name[data_id]] = struct.unpack(\"\", encoding_rules[data_id])[0]\r\n type1, type2 = re.split(\",\", type_name)\r\n type_dict[1] = type1\r\n type_dict[2] = type2\r\n map_private_prop_name[1] = \"first\"\r\n map_private_prop_name[2] = \"second\"\r\n proto_name = os.path.basename(proto_name).split(\".\")[0]\r\n data = parse(byte_str[i:i + length], proto_name, type_dict, map_private_prop_name)\r\n decode_data[prop_name[data_id]].append({data[\"first\"]: data[\"second\"]})\r\n elif encoding_rules[data_id].startswith(\"repeated_\"):\r\n rule = {}\r\n repeated_name = {}\r\n data_type = re.sub(\"repeated_\", \"\", encoding_rules[data_id])\r\n if data_type in need_import:\r\n proto_name = data_type\r\n data = parse(byte_str[i: i + length], proto_name)\r\n else:\r\n rule[1] = data_type\r\n repeated_name[1] = \"1\"\r\n proto_name = os.path.basename(proto_name).split(\".\")[0]\r\n data = parse(byte_str[i: i + length], proto_name, rule, repeated_name, 1)\r\n decode_data[prop_name[data_id]] = data\r\n elif encoding_rules[data_id] in need_import:\r\n decode_data[prop_name[data_id]] = []\r\n decode_data[prop_name[data_id]].append(parse(byte_str[i: i + length], encoding_rules[data_id]))\r\n elif encoding_rules[data_id] in other_message:\r\n decode_data[prop_name[data_id]] = []\r\n proto_name = os.path.basename(proto_name).split(\".\")[0]\r\n decode_data[prop_name[data_id]].append(parse(byte_str[i: i + length], proto_name,\r\n other_message[encoding_rules[data_id]][0],\r\n other_message[encoding_rules[data_id]][1]))\r\n i += length\r\n else:\r\n print(\"protobuf该处字节解析失败:\" + str(i))\r\n if len(args) == 3:\r\n list_decode_data[\"1\"].append(decode_data[\"1\"])\r\n else:\r\n return decode_data\r\n if len(args) == 3:\r\n decode_data = list_decode_data[\"1\"]\r\n return decode_data\r\n\r\n# b_data = b'\\x89\\xab\\xc2\\xb6\\xcd\\x05\\x96@\\xc2\\x0cR\"\\xcfa\\xa8\\x9a\\x10\\xf1\\xc6{\\x90\\x10\\xc9\\x0f\\xd3\\x94\\x9c\\nr\\x8cse\\xa4\\xf6\\xb4\\xed\\xfbC@\\x8a\\xf3\\xb6\\xe2\\xfe\\xf1{\\xfc\\x0fVq\\x8ct\"\\xea&*\\x9d\\xff/\\x1c\\xd5\\x94O\\xa0\\x06\\xee\\xae\\xd7\\t\\x8f\\xd5l\\xc2\\x0c\\x81O\\x99\\xa5\\x8a\\xb8_?x+\\xb2@\\xae\\x05\\xd1\\xb9Z J\\xb5\\x96\\x1e\\xd8\\xfa\\x85\\x14\\xf4Y\\x9a\\xcb\\x8dUZV\\xc8\\xa1\\xf1}\\xb1q\\xe4A\\xe5\\x01pE\\xa6u\\xb3\\xe4l\\x04\\x19\\xc3\\x1c2\\xdf\\xf4e\\x1f5v\\x08\\xa5\\xd0\\x86%i\\xe7?\\xb7\\xe8R\\xe5;1\\r-y\\x7f\\xeaD\\x15\\x85\\x9c\\xff\\xfd\\x96&\\xfc;\\xce'\r\n#\r\n# for i in range(len(b_data)):\r\n# try:\r\n# print(parse(b_data[i:], \"GetAreaExplorePointReq\"))\r\n# print(i)\r\n# input()\r\n# except Exception:\r\n# pass\r\n\r\n\r\n# b_data = b'r\\x07\\x08\\x97\\xe8\\xbcW\\x10%\\x89\\xab\\xba\\xa2\\xe4\\xd5\\xbe}\\x85\\xd6\\xfa\\xd0q\\xfep\\x85;\\xd5\\x17IW\\xef\\xd8\\x12\\xd3\\xa0\\xb9\\x12\\x84g\\x8f\\xedG\\t!\\xd8\\x85\\xfa\\x06\\xba\\x94\\x9a\\xb4\\xc6V\\x9e5A\\x18\\xd1?M\\x9b\\x1eIq\\xc3,\\xec\\x8d\\xc1}\\xba\\x82\\x1b\\x8d\\xd0gZ\\xaeb\\x94p(\\x9b\\xe8\\xcb \\xef\\xaa\\x1e\\x89\\x812(\\x96\\xd42\\xb3\\xdcn\\xba\\xb0\\xb5a\"_.>\\xbfe\\xd5\\xa6\\xf7\\xc2C\\xb9\\xfa\\xe5K\\xb1m\\xac\\xffoK\\xb8\\xc4[\\xeb\\xe7\\x184\\x87k\\xf0\\x8a\\x93\\x7f\\xe5\\xe3\\x8f\\xb1-'\r\n# data_type = b_data[2] & 0b111\r\n# data_id, offset = varint(2, b_data)\r\n# data_id >>= 3\r\n# print(data_id, data_type, offset)\r\n\r\n# data, offset = varint(1, b_data)\r\n# print(data, offset)\r\n\r\n# print(parse(b_data, \"WorldPlayerRTTNotify\"))\r\n\r\n","repo_name":"c2c3vsfac/Iridium-py","sub_path":"parse_proto.py","file_name":"parse_proto.py","file_ext":"py","file_size_in_byte":12409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"73585168837","text":"from typing import Dict, Callable\nimport pandas as pd\nfrom io import BytesIO\nfrom datetime import datetime\n\nfrom src.db import DBModel\nfrom .model import ReportParams\nfrom .queries import QueryFactory\n\nfrom src.utils import Utils\n\n\nclass _ReportService:\n def __init__(self):\n self.DB = DBModel.getInstance()\n self.Factory = QueryFactory()\n self.datetime_format = '%d-%m-%Y'\n \n def get_stop_ridership_report(self, params: ReportParams):\n report_as_dict = self.DB.run(\n self.Factory.get_stop_ridership_report(\n params.date,\n params.stop,\n params.line,\n params.ticket,\n params.start_hour,\n params.end_hour,\n params.preview,\n )\n )\n report_as_df = pd.DataFrame(report_as_dict)\n if (params.preview):\n report_as_df[\"ridership\"] = \"-\"\n else:\n report_as_df = pd.DataFrame(report_as_df)\n ridership_sum = report_as_df[\"ridership\"].sum()\n report_as_df = report_as_df.append({\"line_id\": \"Toplam Biniş\", \"ridership\": ridership_sum}, ignore_index=True)\n return report_as_df.fillna(\"\")\n\n def get_ridership_by_tickets(self, params: ReportParams):\n riderships = self.DB.run(\n self.Factory.ridership_by_ticket_type(params.date, params.line, params.stop, params.preview)\n )\n ridership_df = pd.DataFrame(riderships)\n ridership_sum = ridership_df[\"ridership\"].sum()\n ridership_df = ridership_df.append({\"ticket_type\": \"Toplam Biniş\", \"ridership\": ridership_sum}, ignore_index=True)\n return ridership_df.fillna(\"\")\n\n def get_line_ridership(self, params: ReportParams):\n riderships = self.DB.run(\n self.Factory.ridership_by_lines(params.date, params.stop, params.ticket, params.preview)\n )\n return riderships\n\nreport_service = _ReportService()","repo_name":"dev-paraboly/python-interview-project","sub_path":"src/v1/report/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11385905359","text":"'''\nmock:\n1.接口测试的测试场景较难模拟,要大量的工作才能做好\n2.该接口的测试,依赖其他接口的模块,依赖的接口尚未开发完成\n测试条件不充分时,怎么开展接口测试?\n使用Mock模拟接口的返回值\n'''\nfrom unittest import mock\n\nimport requests\n\n'''\n支付接口:http://www.zhifu.com/\n方法:post\n参数:{\"订单号\":\"12345\",\"支付金额\":20.56,\"支付方式\":\"支付宝/微信/余额宝/银行卡\"}\n返回值:{\"code\":200, \"msg\":\"支付成功\"},{\"code\":201, \"msg\":\"支付失败\"}\n接口尚未开发完成\n'''\n\n\nclass Pay:\n def zhifu(self, data):\n r = requests.post(\"http://www.zhifu.com/\", data=data)\n return r.json()\n\n\n\n\ndef test_001():\n pay = Pay()\n # 通过mock模拟接口的返回值\n pay.zhifu = mock.Mock(return_value={\"code\": 200, \"msg\": \"支付成功\"})\n canshu = {\"订单号\": \"12345\", \"支付金额\": 20.56, \"支付方式\": \"支付宝\"}\n r = pay.zhifu(canshu)\n print(r)\n assert r['msg'] == \"支付成功\"\n\n\ndef test_002():\n pay = Pay()\n # 通过mock模拟接口的返回值\n pay.zhifu = mock.Mock(return_value={\"code\": 201, \"msg\": \"支付失败\"})\n canshu = {\"订单号\": \"12345\", \"支付金额\": -20.56, \"支付方式\": \"支付宝\"}\n r = pay.zhifu(canshu)\n print(r)\n assert r['msg'] == \"支付失败\"\n\n\n# 模块名.类名.方法名\n@mock.patch(\"test_001.Pay.zhifu\", return_value={\"code\": 200, \"msg\": \"支付成功\"})\ndef test_003(n):\n pay = Pay()\n n.return_value = {\"code\": 200, \"msg\": \"支付超时\"}\n canshu = {\"订单号\": \"12345\", \"支付金额\": 2000.56, \"支付方式\": \"微信\"}\n r = pay.zhifu(canshu)\n print(r)\n assert r['msg'] == \"支付超时\"\n\n\n# 取现接口未实现,写一个取现成功的测试用例\ndef quxian(data):\n r = requests.post(\"http://jy001:8081/futureloan/mvc/api/member/withdraw\", data=data)\n print(\"实际返回值\", r.text)\n return r.json()\n\n\ndef test_quxian():\n canshu = {\"mobilephone\": \"18312332166\", \"amount\": 1001}\n quxian = mock.Mock(return_value={'status': 1, 'code': '20001', 'data': None, 'msg': '取现成功'})\n r = quxian(canshu)\n print(\"mock后的返回值\", r)\n\n","repo_name":"chenp123123/ApiAutoTest","sub_path":"day05/test_001.py","file_name":"test_001.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10656256302","text":"import copy\nimport math\n\nimport numpy as np\n\n\nclass NodeAlpha:\n def __init__(self, game, args, state, parent=None, action_taken=None, prior=0, visit_count=0):\n self.game = game\n self.args = args\n self.state = state\n self.parent = parent\n self.action_taken = action_taken\n self.prior = prior\n\n self.children = []\n self.expandable_moves = game.get_valid_moves(state)\n\n self.visit_count = visit_count\n self.value_sum = 0\n\n def is_fully_expanded(self):\n return len(self.children) > 0\n\n def select(self):\n best_child = None\n best_ucb = -np.inf\n\n for child in self.children:\n ucb = self.get_ucb(child)\n if ucb > best_ucb:\n best_child = child\n best_ucb = ucb\n\n return best_child\n\n def get_ucb(self, child):\n child_wins = child.value_sum\n\n if child.visit_count == 0:\n q_value = 0\n else:\n q_value = child_wins / child.visit_count\n\n return q_value + self.args['C'] * child.prior * (math.sqrt(self.visit_count) / (child.visit_count + 1))\n\n def expand(self, policy):\n for action, prob in enumerate(policy):\n if prob > 0:\n child_state = copy.deepcopy(self.state)\n child_state = self.game.get_next_state(child_state, action, child_state.next_to_move)\n\n child = NodeAlpha(self.game, self.args, child_state, self, action, prob)\n self.children.append(child)\n\n def backpropagate(self, value):\n self.value_sum += value\n self.visit_count += 1\n\n value *= -1\n if self.parent is not None:\n self.parent.backpropagate(value)\n","repo_name":"GabrielScinteie/BachelorThesis","sub_path":"Agent/AlphaGoZero/NodeAlpha.py","file_name":"NodeAlpha.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18038525530","text":"import math\n\n\nclass Version:\n def __init__(self, text: str, release: bool = False):\n # test: 4.5.301 -> 4.5.302\n # release: 4.5.3 -> 4.6.4\n self.__release = release\n self.__current = text[text.rfind(' ') + 1:].replace('\\'', '')\n segments = self.__current.split('.')\n if release:\n fix = math.floor((int(segments[2]) * math.pow(10, 3 - len(segments[2])) + 100) / 100)\n minor = int(segments[1])\n if fix >= 10:\n minor += 1\n fix = 0\n if minor >= 10:\n segments[1] = '0'\n segments[0] = str(int(segments[0]) + 1)\n else:\n segments[1] = str(minor)\n segments[2] = str(fix)\n else:\n fix = int(int(segments[2]) * math.pow(10, 3 - len(segments[2]))) + 1\n temp = str(fix)\n # 补齐\n segments[2] = '0' * (3 - len(temp)) + temp\n self.__next = '.'.join(segments)\n # print(f\"{release} {self.__current} -> {segments} -> {'.'.join(segments)}\")\n\n def next(self) -> str:\n return self.__next\n\n def __str__(self) -> str:\n return '{}: {} -> {}'.format('release' if self.__release else 'test ', self.__current, self.__next)\n\n\ndef run_version_test():\n lines = ['4.5.9', '4.5.340', '4.5.30', '4.5.378', '4.9.901', '9.9.9', '6.0.0', '4.5.001']\n for i in range(len(lines)):\n print(Version(lines[i], False))\n print(Version(lines[i], True))\n\n\ndef run_num_test():\n print(int('-19'))\n try:\n print(int('-19.0'))\n except ValueError as e:\n print(e)\n\n\nif __name__ == '__main__':\n run_version_test()\n # run_num_test()\n","repo_name":"sleticalboy/Dailearn","sub_path":"sub-python/etools/auto-version.py","file_name":"auto-version.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"70005775237","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtGui\nfrom views.ddim_panel import DDIMPanel\nfrom views.blending_panel import BlendingPanel\nfrom views.paint_panel import PaintPanel\nfrom views.preference_view import PreferenceView\n\nfrom resources import icons\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow, args):\n\n # Get monitor resolution\n self.screen_size = QtWidgets.QDesktopWidget().screenGeometry(-1)\n MainWindow.setGeometry((int)(self.screen_size.width()*.2), (int)(self.screen_size.height()*.1), \n (int)(self.screen_size.width()*.6), (int)(self.screen_size.height()*.8))\n\n # Workspace layout\n self.main_splitter = QtWidgets.QSplitter(Qt.Vertical)\n MainWindow.setCentralWidget(self.main_splitter)\n\n # section -- paint panel \n self.paint_label = QtWidgets.QLabel(\"Paint Panel\")\n self.paint_label.setAlignment(Qt.AlignCenter)\n self.paint_label.setFont(QtGui.QFont(\"AnyStyle\", 24))\n # paint scene\n self.paint_scene = PaintPanel(args.canvas, args.width, args.height)\n # paint panel run button\n self.run_btn = QtWidgets.QPushButton(\"Run\")\n self.run_btn.setFont(QtGui.QFont(\"AnyStyle\", 18))\n # paint panel \n self.paint_panel = QtWidgets.QWidget()\n # paint panel layout\n self.paint_layout = QtWidgets.QVBoxLayout()\n self.paint_panel.setLayout(self.paint_layout)\n self.paint_layout.addWidget(self.paint_label)\n self.paint_layout.addWidget(self.paint_scene)\n self.paint_layout.addWidget(self.run_btn)\n\n # ddim panel, or you can create your QWidget\n # section -- ddim panel \n self.ddim_label = QtWidgets.QLabel(\"Diffusion Results\")\n self.ddim_label.setAlignment(Qt.AlignHCenter | Qt.AlignTop)\n self.ddim_label.setFont(QtGui.QFont(\"AnyStyle\", 24))\n self.ddim_label.setMaximumHeight(80)\n\n # ddim scene\n self.ddim_scene = DDIMPanel(args.canvas, args.width, args.height)\n # ddim panel \n self.ddim_panel = QtWidgets.QWidget()\n # ddim panel layout\n self.ddim_layout = QtWidgets.QVBoxLayout()\n self.ddim_panel.setLayout(self.ddim_layout)\n self.ddim_layout.addWidget(self.ddim_label)\n self.ddim_layout.addWidget(self.ddim_scene)\n\n # image_blending panel, or you can create your QWidget\n # section -- blending panel \n self.blending_label = QtWidgets.QLabel(\"Blending Results\")\n self.blending_label.setAlignment(Qt.AlignHCenter | Qt.AlignTop)\n self.blending_label.setFont(QtGui.QFont(\"AnyStyle\", 24))\n self.blending_label.setMaximumHeight(80)\n # blending scene\n self.blending_scene = BlendingPanel(out_width = args.out_width)\n # blending panel \n self.blending_panel = QtWidgets.QWidget()\n # blending panel layout\n self.blending_layout = QtWidgets.QVBoxLayout()\n self.blending_panel.setLayout(self.blending_layout)\n self.blending_layout.addWidget(self.blending_label)\n self.blending_layout.addWidget(self.blending_scene)\n\n # console panel\n # self.process = QtWidgets.QTextEdit()\n # self.process.setMaximumHeight(self.screen_size.height() - (int)(args.height*1.3))\n # self.process.moveCursor(QtGui.QTextCursor.Start)\n # self.process.ensureCursorVisible()\n # self.process.setLineWrapColumnOrWidth(500)\n # self.process.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth)\n\n # add ddim panel and image_blending panel(sample code)\n self.main_splitter.addWidget(self.paint_panel)\n self.main_splitter.addWidget(self.ddim_panel)\n self.main_splitter.addWidget(self.blending_panel)\n # self.main_splitter.addWidget(self.process)\n\n # creating menu bar\n self.main_menu = QtWidgets.QMenuBar()\n\n # Adding menu options\n self.file_menu = self.main_menu.addMenu(\"File\")\n self.edit_menu = self.main_menu.addMenu(\"Edit\")\n self.run_menu = self.main_menu.addMenu(\"Run\")\n MainWindow.setMenuBar(self.main_menu)\n\n # creating tool bar\n self.edit_tool_bar = QtWidgets.QToolBar()\n self.edit_tool_bar.setIconSize(QtCore.QSize(96, 96))\n MainWindow.addToolBar(Qt.LeftToolBarArea, self.edit_tool_bar)\n \n # creating export option\n self.export_action = QtWidgets.QAction(\"&Export\")\n self.file_menu.addAction(self.export_action)\n\n # creating clear option\n self.clear_all_action = QtWidgets.QAction(\"&Clear all\")\n self.file_menu.addAction(self.clear_all_action)\n\n # creating preference option\n self.preference_action = QtWidgets.QAction(\"Preference\")\n self.edit_menu.addAction(self.preference_action)\n self.preference_view = PreferenceView(args)\n\n # creating run option\n self.run_action = QtWidgets.QAction(\"&Run\")\n self.run_menu.addAction(self.run_action)\n\n # creating edit option -- brush\n self.brush_action = QtWidgets.QAction(QtGui.QIcon(\":brush_white.png\"), \"&Brush\")\n self.edit_tool_bar.addAction(self.brush_action)\n\n # creating edit option -- palette\n self.palette_action = QtWidgets.QAction(QtGui.QIcon(\":palette_white.png\"), \"&Palette\")\n self.edit_tool_bar.addAction(self.palette_action)\n\n # creating edit option -- eraser\n self.eraser_action = QtWidgets.QAction(QtGui.QIcon(\":eraser_white.png\"), \"&Eraser\")\n self.edit_tool_bar.addAction(self.eraser_action)\n\n # creating edit option -- brush size\n self.increment_action = QtWidgets.QAction(QtGui.QIcon(\":increment_white.png\"), \"&Increment\")\n self.edit_tool_bar.addAction(self.increment_action)\n\n self.size_slider = QtWidgets.QSlider(Qt.Vertical)\n self.size_slider.setRange(20, 250)\n self.size_slider.setValue(30)\n self.size_slider.setTickInterval(10)\n self.size_slider.setSingleStep(10)\n self.size_slider.setMaximumHeight((int)(self.screen_size.height()*.1))\n self.edit_tool_bar.addWidget(self.size_slider)\n\n self.decrement_action = QtWidgets.QAction(QtGui.QIcon(\":decrement_white.png\"), \"&Decrement\")\n self.edit_tool_bar.addAction(self.decrement_action)\n\n self.color_dialog = QtWidgets.QColorDialog()\n\n","repo_name":"donglinwu6066/2022-NYCU-EVA-lab-project-demo-app","sub_path":"src/views/main_view_ui.py","file_name":"main_view_ui.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5811488384","text":"import math\nimport numpy as np\nfrom termcolor import colored\nfrom structure.minibatch import *\nfrom structure.sample import *\n\n'''\nClass that provides batch selection functions on the finally constructed training & test dataset\n'''\nclass BatchPatcher(object):\n def __init__(self, loaded_data, batch_size, replacement = False):\n # Assign data\n self.loaded_data = loaded_data\n self.size_of_data = len(loaded_data)\n\n # Setup meta info for training\n self.batch_size = batch_size\n self.num_iters_per_epoch = int(math.ceil(float(self.size_of_data) / float(self.batch_size)))\n\n # Replacement in mini-batch for random batch selection\n self.replacement = replacement\n\n print(colored(\"[LOG]\", \"blue\"), \"[Train patcher] # training samples:\", self.size_of_data, \"# iters per epoch:\", self.num_iters_per_epoch)\n\n def set_batch_size(self, batch_size):\n self.batch_size = batch_size\n self.num_iters_per_epoch = int(math.ceil(float(self.size_of_data) / float(self.batch_size)))\n\n\n # Randomly select next mini-batch samples\n def get_next_random_mini_batch(self):\n selected_sample_ids = np.random.choice(self.size_of_data, self.batch_size, self.replacement)\n\n # Fetch mini-batch samples from loaded_data in main memory\n mini_batch = MiniBatch()\n for id in selected_sample_ids:\n sample = self.loaded_data[id]\n mini_batch.append(sample.id, sample.image, sample.label)\n\n return mini_batch.ids, mini_batch.images, mini_batch.labels\n\n #Iterate all training samples sequentially to evaluate the loss and error\n def get_eval_mini_batch(self, init_id):\n # init_id from 0~self.num_iters_per_epoch\n selected_sample_ids = list(range(init_id*self.batch_size, init_id*self.batch_size + self.batch_size))\n\n # Fetch mini-batch samples from loaded_data in main memory\n mini_batch = MiniBatch()\n for id in selected_sample_ids:\n if id >= self.size_of_data:\n continue\n else:\n sample = self.loaded_data[id]\n mini_batch.append(sample.id, sample.image, sample.label)\n\n return mini_batch.ids, mini_batch.images, mini_batch.labels","repo_name":"anirudhganguly44/team-tinker","sub_path":"Prestopping_Colab/src/reader/batch_patcher.py","file_name":"batch_patcher.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"20543343239","text":"#Importing libraries\r\nfrom patchify import patchify\r\nimport cv2\r\n\r\n#Reading the image that is going to be patched\r\nimg = cv2.imread(\"image_.jpg\")\r\n\r\n#Resizing the img to 4000 x 3000 \r\nimg = cv2.resize(img, (4000, 3000))\r\n\r\n#Patchifying the image\r\npatches_img = patchify(img, (1000,1000,3), step=500)\r\nfor i in range(patches_img.shape[0]):\r\n for j in range(patches_img.shape[1]):\r\n single_patch_img = patches_img[i, j, 0, :, :, :]\r\n if not cv2.imwrite('patches/' + 'image_' + '_'+ str(i)+str(j)+'.jpg', single_patch_img):\r\n raise Exception(\"Could not write the image\")","repo_name":"RoboneClub/NaturLab-analysis","sub_path":"patching.py","file_name":"patching.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72028018756","text":"from dataclasses import dataclass\nfrom decimal import Decimal\nfrom typing import List\nfrom uuid import UUID\n\nfrom arbeitszeit.repositories import DatabaseGateway\n\n\n@dataclass\nclass ShowMyAccountsRequest:\n current_user: UUID\n\n\n@dataclass\nclass ShowMyAccountsResponse:\n balances: List[Decimal]\n\n\n@dataclass\nclass ShowMyAccounts:\n database: DatabaseGateway\n\n def __call__(self, request: ShowMyAccountsRequest) -> ShowMyAccountsResponse:\n accounts = dict(\n (account.id, balance)\n for account, balance in self.database.get_accounts()\n .owned_by_company(request.current_user)\n .joined_with_balance()\n )\n company = self.database.get_companies().with_id(request.current_user).first()\n assert company\n balances = [\n accounts[company.means_account],\n accounts[company.raw_material_account],\n accounts[company.work_account],\n accounts[company.product_account],\n ]\n return ShowMyAccountsResponse(balances=balances)\n","repo_name":"arbeitszeit/arbeitszeitapp","sub_path":"arbeitszeit/use_cases/show_my_accounts.py","file_name":"show_my_accounts.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"62"} +{"seq_id":"23198120375","text":"import json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom .models import User, Post, Profile\n\n\ndef index(request):\n page_posts = Paginator(Post.objects.all(), 10)\n page_number = request.GET.get(\"page\")\n if page_number != None:\n try:\n required_page = page_posts.page(page_number)\n except:\n required_page = page_posts.page(1)\n else: \n required_page = page_posts.page(1)\n return render(request, \"network/index.html\", {\n \"posts\": required_page\n })\n\n@login_required\ndef profile(request, user_id):\n key_user = User.objects.get(username=user_id)\n key = Profile.objects.get(user=key_user)\n print(key)\n page_posts = Paginator(Post.objects.filter(user=key_user), 10)\n page_number = request.GET.get(\"page\")\n if page_number != None:\n try:\n required_page = page_posts.page(page_number)\n except:\n required_page = page_posts.page(1)\n else: \n required_page = page_posts.page(1)\n return render(request, \"network/profile.html\", {\n \"posts\": required_page,\n \"profile\": key\n })\n\ndef get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n firstName = request.POST[\"firstname\"]\n lastName = request.POST[\"lastname\"]\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.first_name = firstName\n user.last_name = lastName \n user.save()\n profile = Profile(user=user)\n profile.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n\n@login_required\n@csrf_exempt\ndef new_post(request):\n if request.method == \"POST\":\n post_data = json.loads(request.body)\n post_body = post_data.get(\"body\", \"\")\n post = Post(\n user = request.user,\n body = post_body \n )\n post.save()\n content = {\n 'status': 201,\n 'post_id': post.id,\n 'username': request.user.username,\n 'body': post.body,\n 'likes': post.likes.all().count(),\n 'timestamp': post.timestamp.strftime(\"%B %d, %Y, %I:%M %p\")\n }\n return JsonResponse(content, status=201)\n return JsonResponse({\"error\":\"Not POST method\" }, status=400)\n\n@login_required\n@csrf_exempt\ndef follow(request):\n if request.method == \"PUT\":\n put_data = json.loads(request.body)\n follow_username = put_data.get(\"user\", \"\")\n follow_user = User.objects.get(username=follow_username)\n follow_profile = Profile.objects.get(user=follow_user)\n following_profile = Profile.objects.get(user=request.user)\n follow_action = put_data.get(\"follow_action\",\"\")\n if(follow_action == True):\n follow_profile.follower.add(request.user)\n following_profile.following.add(follow_user)\n print(\"follow\")\n else:\n follow_profile.follower.remove(request.user)\n following_profile.following.remove(follow_user)\n print(follow_profile.follower)\n follow_profile.save()\n response = {\n \"status\": 201,\n \"action\": follow_action,\n \"message\": \"Followed success\"\n }\n return JsonResponse(response, status=201)\n return JsonResponse({\"error\":\"Followed Failure\"}, status=400)\n\n@login_required\ndef following(request):\n profile = Profile.objects.get(user=request.user)\n following_users = profile.following.all()\n page_posts = Paginator(Post.objects.filter(user__in=following_users), 10)\n page_number = request.GET.get(\"page\")\n if page_number != None:\n try:\n required_page = page_posts.page(page_number)\n except:\n required_page = page_posts.page(1)\n else: \n required_page = page_posts.page(1)\n return render(request, \"network/following.html\", {\n \"posts\": required_page,\n })\n\n@login_required\n@csrf_exempt\ndef edit(request):\n if request.method == \"PUT\":\n put_data = json.loads(request.body)\n post_ID = put_data.get(\"ID\", \"\")\n target_post = Post.objects.get(pk=post_ID)\n if target_post.user != request.user:\n return JsonResponse({\"error\":\"Restricted Action\"}, status=401)\n else:\n target_post.body = put_data.get(\"body\", \"\")\n target_post.save()\n return JsonResponse({\"message\":\"Successfully edited\",\n \"body\": target_post.body}, status=201)\n return JsonResponse({\"error\":\"Followed Failure\"}, status=400)\n\n\n@login_required\n@csrf_exempt\ndef like(request):\n if request.method == \"PUT\":\n put_data = json.loads(request.body)\n post_ID = put_data.get(\"ID\", \"\")\n target_post = Post.objects.get(pk=post_ID)\n if request.user not in target_post.likes.all():\n target_post.likes.add(request.user)\n return JsonResponse({\"isLiked\": True}, status=201)\n else:\n target_post.likes.remove(request.user)\n return JsonResponse({\"isLiked\": False}, status=201)\n return JsonResponse({\"error\":\"Forbidden Request\"}, status=401)\n","repo_name":"amith2368/cs50-network","sub_path":"network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"16946740772","text":"from sklearn.datasets import load_iris\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass MyMLP():\r\n def __init__(self, params=None):\r\n 'If params is None the MLP is initialized with default values.'\r\n if params == None:\r\n self.alpha = 0.25\r\n self.n_nlayer = np.array([4, 10, 6, 3, 3]) # numero de neuronas por layer\r\n self.w = []\r\n self.bias = []\r\n for i in range(len(self.n_nlayer[1:])): # ctdad de layer -1\r\n self.w.append(np.random.randn(self.n_nlayer[i], self.n_nlayer[i + 1]) * 0.05)\r\n self.bias.append(np.random.randn(self.n_nlayer[i + 1]))\r\n self.func_act = 'sigmoide'\r\n else:\r\n self.alpha = params[0]\r\n self.n_nlayer = params[1]\r\n self.w = params[2]\r\n self.bias = params[3]\r\n self.func_act = params[4]\r\n # Build layers\r\n self.net = []\r\n self.out_layers = []\r\n self.dE = []\r\n [self.net.append(np.transpose(np.zeros(i))) for i in self.n_nlayer[1:]] # ctdad de neuronas a partir de la 2da\r\n [self.out_layers.append(np.transpose(np.zeros(i))) for i in self.n_nlayer] # ctdad de neuronas en cada layer\r\n [self.dE.append(np.zeros(self.n_nlayer[i + 1])) for i in range(len(self.n_nlayer[1:]))]\r\n self.E = []\r\n\r\n def propagate(self, X):\r\n ''' Propagate data from input layer to output layer. '''\r\n self.out_layers[0] = X # Capa de entrada\r\n for i in range(len(self.n_nlayer[1:])): # cada layer (a partir de la 1ra hidden)\r\n self.net[i] = self.w[i].T.dot(self.out_layers[i]) + self.bias[i] # salida de cada neuron\r\n self.out_layers[i + 1] = self.fnc_act(self.net[i], self.func_act) # salida de cada capa\r\n\r\n def learn(self, X, y):\r\n error = abs(y - self.out_layers[-1]) # |y - y_hat|\r\n self.carga_Error(error) # .5|y - y_hat|^2\r\n self.dE[-1] = np.array(\r\n [o * (1 - o) * error[i] for i, o in enumerate(self.out_layers[-1])]) # delta de la capa de salida\r\n # self.dE[-1] = self.der_func_act(self.net[-1], self.func_act) * error\r\n # self.E.append(0.5 * np.sum((error * self.dE[-1]) ** 2))\r\n\r\n # Paso del error hacia atras (Backpropagation)\r\n for lh in reversed(range(len(self.n_nlayer[2:]))): # - capa de entrada y capa de salida\r\n der_func = self.out_layers[lh + 1] * (\r\n 1 - self.out_layers[lh + 1]) # self.der_func_act(self.net[lh], self.func_act)\r\n for n_lh in range(len(self.dE[lh])): # nodo en cada capa lh\r\n sumatoria = np.dot(self.w[lh + 1][n_lh, :].T, self.dE[lh + 1])\r\n self.dE[lh][n_lh] = der_func[n_lh] * sumatoria\r\n # Actualizacion de los pesos\r\n for i in range(len(self.w)):\r\n self.w[i] = self.w[i] - self.alpha * (np.dot(self.out_layers[i + 1].T, self.dE[i]))\r\n self.bias[i] = self.bias[i] + self.alpha * self.dE[i]\r\n\r\n def carga_Error(self, error):\r\n self.E.append(0.5 * np.sum((error) ** 2)) # .5|y - y_hat|^2\r\n\r\n def load_dataset(self, params=None):\r\n if params == None:\r\n iris_data = load_iris()\r\n self.n_samples, self.n_features = iris_data.data.shape\r\n self.X = iris_data.data\r\n self.Y = iris_data.target\r\n else:\r\n self.X = params[0]\r\n self.Y = params[1]\r\n self.n_samples, self.n_features = params[2]\r\n\r\n (ind_train, ind_valid, ind_teste) = self.dataset_Divided(self.X, self.Y, self.n_samples)\r\n self.X_1 = self.X[ind_train]\r\n self.X_2 = self.X[ind_valid]\r\n self.X_3 = self.X[ind_teste]\r\n self.Y_1 = self.Y[ind_train]\r\n self.Y_2 = self.Y[ind_valid]\r\n self.Y_3 = self.Y[ind_teste]\r\n\r\n def dataset_Divided(self, X, Y, n_samples, pc_train=75, pc_valid=15, pc_teste=5):\r\n indices = list(range(n_samples))\r\n [np.random.shuffle(indices) for i in range(3)]\r\n top_train = int(np.ceil(n_samples * pc_train / 100))\r\n top_valid = int(np.ceil(n_samples * (pc_train + pc_valid) / 100))\r\n ind_train = indices[:top_train]\r\n ind_valid = indices[top_train:top_valid]\r\n ind_teste = indices[top_valid:]\r\n return (ind_train, ind_valid, ind_teste)\r\n\r\n def fnc_act(self, n, func):\r\n n = np.array(n)\r\n if func == 'degrau': # degrau\r\n return (n >= 0) * 1\r\n elif func == 'sigmoide': # sigmoide\r\n return (1 / (1 + np.exp(-5 * n)))\r\n elif func == 'lineal':\r\n return n\r\n elif func == 'ReLU':\r\n return np.maximum(0, n)\r\n elif func == 'tanh':\r\n return np.tanh(n)\r\n\r\n def der_func_act(self, n, func):\r\n dn = np.array(n)\r\n if func == 'degrau': # degrau\r\n return (dn >= 0) * 1\r\n elif func == 'sigmoide': # sigmoide\r\n y = self.fnc_act(dn, 'sigmoide')\r\n return 5 * y * (1 - y)\r\n elif func == 'lineal':\r\n return 1\r\n elif func == 'ReLU':\r\n return (dn >= 0) * 1\r\n elif func == 'tanh':\r\n return (1 - (np.tan(dn) ** 2))\r\n\r\n\r\n# INICIO\r\n\r\nMLP = MyMLP()\r\nMLP.load_dataset()\r\n\r\narr_Y = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\r\nindX1 = list(range(len(MLP.Y_1)))\r\nindX2 = list(range(len(MLP.Y_2)))\r\nindX3 = list(range(len(MLP.Y_3)))\r\n\r\nepochs = 100\r\nE_X1 = []\r\nE_X2 = []\r\nE_X3 = []\r\n# Train\r\nfor e in range(epochs):\r\n #print('Epoch: ', e)\r\n # n = np.random.randint(MLP.n_samples)\r\n # TRAIN\r\n # np.random.shuffle(indX1)\r\n for i in indX1:\r\n MLP.propagate(MLP.X_1[i])\r\n MLP.learn(MLP.X_1[i], arr_Y[MLP.Y_1[i]])\r\n E_X1.append(np.average(MLP.E))\r\n MLP.E = []\r\n # VALIDATION\r\n # np.random.shuffle(indX2)\r\n for i in indX2:\r\n MLP.propagate(MLP.X_1[i])\r\n MLP.learn(MLP.X_2[i], arr_Y[MLP.Y_2[i]])\r\n E_X2.append(np.average(MLP.E))\r\n MLP.E = []\r\n # TESTE\r\n # np.random.shuffle(indX3)\r\n for i in indX3:\r\n MLP.propagate(MLP.X_3[i])\r\n MLP.carga_Error(abs(arr_Y[MLP.Y_3[i]] - MLP.out_layers[-1]))\r\n #print(MLP.X_3[i], \" |\", arr_Y[MLP.Y_3[i]], \" | \", MLP.out_layers[-1], \" | \", MLP.dE[-1], \" | \", MLP.E[-1])\r\n E_X3.append(np.average(MLP.E))\r\n MLP.E = []\r\n\r\nplt.plot(range(len(E_X1)), E_X1)\r\nplt.plot(range(len(E_X2)), E_X2)\r\nplt.plot(range(len(E_X3)), E_X3)\r\nplt.legend(['training', 'validation', 'teste'])\r\nplt.ylabel('Loss')\r\nplt.xlabel('Epochs')\r\nplt.show()\r\n","repo_name":"ang3lvd/-MachingLearning-T1","sub_path":"AM_T1.py","file_name":"AM_T1.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17886209744","text":"import json\nfrom typing import Union, Any\n\n\ndef get_sum_list_elements(element: Any, ignore: str = None) -> int:\n total_sum = 0\n if isinstance(element, list) or \\\n (isinstance(element, dict) and ignore not in (list(element.keys()) + list(element.values()))):\n total_sum += get_sum_elements(element, ignore)\n elif isinstance(element, int):\n total_sum += element\n return total_sum\n\n\ndef get_sum_elements(input_json: Union[list, dict], ignore: str = None) -> int:\n total_sum = 0\n if isinstance(input_json, list):\n for element in input_json:\n total_sum += get_sum_list_elements(element, ignore)\n elif isinstance(input_json, dict):\n for key, element in input_json.items():\n if isinstance(key, int):\n total_sum += key\n total_sum += get_sum_list_elements(element, ignore)\n return total_sum\n\n\ndef main():\n input_json = json.load(open('day_12_input.txt'))\n print('Sum of all the numbers is %s' % get_sum_elements(input_json))\n print('Sum of all the numbers without red is %s' % get_sum_elements(input_json, 'red'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Javitronxo/AdventOfCode","sub_path":"2015/day_12.py","file_name":"day_12.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"11047535726","text":"import filters as ft\nimport numpy as np\nimport os as os\n\nfilt_folder = \"/Users/pmarti/Dropbox/Tesi/filt/filters_120_20120612/\" #Folder where the computed bands will be located \neff_filt_folder = \"/Users/pmarti/Dropbox/Tesi/filt/filters_120_20120612_eff/\"\ntrans_folder = \"/Users/pmarti/Dropbox/Tesi/filt/PAU_trans_curves/\"\n\nfiles = os.listdir(filt_folder)\n\n#Compute filters..........................................................\nfor name in files:\n\tlam, R = np.loadtxt(filt_folder + name, unpack = True)\n\teff_R = ft.gen_effect_filt(lam, R, trans_folder)\n\tfilt = np.array([lam, eff_R])\n\tnp.savetxt(eff_filt_folder + name[:-4] + \".res\", filt.T, fmt = [\"%2.2f\", \"%5.5f\"])\n\n#Plot set of narrow filt.................................................\nft.plot_set_filt(eff_filt_folder)\n","repo_name":"polmartisanahuja/PAU-Genfilt","sub_path":"gen_effect_filt.py","file_name":"gen_effect_filt.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29817034938","text":"import logging\nimport os\nimport sys\nfrom typing import Dict, List, Optional\n\nimport torch\nfrom fairseq.models import (\n FairseqIncrementalDecoder,\n FairseqLanguageModel,\n register_model,\n register_model_architecture,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_MAX_TARGET_POSITIONS = 1024\n\n\n@register_model(\"hf_gpt2\")\nclass HuggingFaceGPT2LanguageModel(FairseqLanguageModel):\n def __init__(self, decoder):\n super().__init__(decoder)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--embed-dim', type=int, metavar='N',\n help='embedding dimension')\n parser.add_argument('--num-attention-heads', type=int, metavar='N',\n help='num attention heads')\n parser.add_argument('--num-layers', type=int, metavar='N',\n help='num layers')\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability for all fully connected layers '\n 'in the embeddings, encoder, and pooler')\n parser.add_argument('--attention-dropout', type=float, metavar='D',\n help='dropout probability for attention weights')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n default_architecture(args)\n return cls(HuggingFaceGPT2Decoder(args, task))\n\n\nclass HuggingFaceGPT2Decoder(FairseqIncrementalDecoder):\n def __init__(self, args, task):\n try:\n from transformers import GPT2Config, GPT2LMHeadModel\n except ImportError:\n raise ImportError(\n \"\\n\\nPlease install huggingface/transformers with:\"\n \"\\n\\n pip install transformers\"\n )\n\n super().__init__(task.target_dictionary)\n\n config = GPT2Config(\n vocab_size=len(task.target_dictionary),\n n_positions=args.max_target_positions + 1,\n n_ctx=args.max_target_positions,\n n_embd=args.embed_dim,\n n_layer=args.num_layers,\n n_head=args.num_attention_heads,\n resid_pdrop=args.dropout,\n embd_pdrop=args.dropout,\n attn_pdrop=args.attention_dropout,\n layer_norm_epsilon=1e-6,\n )\n self.model = GPT2LMHeadModel(config)\n\n # set zero embedding for padding symbol\n self.pad_idx = task.target_dictionary.pad()\n self.model.transformer.wte.weight.data[self.pad_idx].zero_()\n self.model.transformer.wpe.weight.data[0].zero_()\n\n def forward(\n self,\n prev_output_tokens,\n src_lengths=None,\n incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,\n encoder_out=None,\n ):\n features = self.extract_features(prev_output_tokens, incremental_state)\n lm_logits = self.model.lm_head(features)\n return (lm_logits,)\n\n def extract_features(\n self,\n prev_output_tokens,\n incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,\n ):\n if incremental_state:\n past = self.get_incremental_state(\"past\")\n else:\n past = None\n\n # don't attend to padding symbols\n attention_mask = prev_output_tokens.ne(self.pad_idx).int()\n\n # set position ids to exclude padding symbols\n position_ids = attention_mask * (\n torch.arange(1, 1 + prev_output_tokens.size(1))\n .to(prev_output_tokens)\n .repeat(prev_output_tokens.size(0), 1)\n )\n\n outputs = self.model.transformer(\n input_ids=prev_output_tokens,\n past=past,\n attention_mask=attention_mask,\n position_ids=position_ids,\n )\n last_hidden_states = outputs[0]\n\n if incremental_state:\n self.set_incremental_state(incremental_state, \"past\", outputs[1])\n\n return last_hidden_states\n\n def max_positions(self):\n return self.model.config.n_positions - 1\n\n\n@register_model_architecture(\"hf_gpt2\", \"hf_gpt2\")\ndef default_architecture(args):\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n args.embed_dim = getattr(args, \"embed_dim\", 768)\n args.num_attention_heads = getattr(args, \"num_attention_heads\", 12)\n args.num_layers = getattr(args, \"num_layers\", 12)\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.1)\n\n\n@register_model_architecture(\"hf_gpt2\", \"hf_gpt2_medium\")\ndef hf_gpt2_medium(args):\n args.embed_dim = getattr(args, \"embed_dim\", 1024)\n args.num_attention_heads = getattr(args, \"num_attention_heads\", 16)\n args.num_layers = getattr(args, \"num_layers\", 24)\n default_architecture(args)\n\n\n@register_model_architecture(\"hf_gpt2\", \"hf_gpt2_large\")\ndef hf_gpt2_large(args):\n args.embed_dim = getattr(args, \"embed_dim\", 1280)\n args.num_attention_heads = getattr(args, \"num_attention_heads\", 20)\n args.num_layers = getattr(args, \"num_layers\", 36)\n default_architecture(args)\n\n\n@register_model_architecture(\"hf_gpt2\", \"hf_gpt2_xl\")\ndef hf_gpt2_xl(args):\n args.embed_dim = getattr(args, \"embed_dim\", 1600)\n args.num_attention_heads = getattr(args, \"num_attention_heads\", 25)\n args.num_layers = getattr(args, \"num_layers\", 48)\n default_architecture(args)\n","repo_name":"facebookresearch/fairseq","sub_path":"fairseq/models/huggingface/hf_gpt2.py","file_name":"hf_gpt2.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":28050,"dataset":"github-code","pt":"62"} +{"seq_id":"75213342596","text":"\"\"\"\nThis module implements some standard regression models:\n\nGeneralized Least Squares (GLS),\nOrdinary Least Squares (OLS),\nand Weighted Least Squares (WLS),\nas well as an GLS model with autoregressive error terms GLSAR(p)\n\nModels are specified with an endogenous response variable and an\nexogenous design matrix and are fit using their `fit` method.\n\nSubclasses that have more complicated covariance matrices\nshould write over the 'whiten' method as the fit method\nprewhitens the response by calling 'whiten'.\n\nGeneral reference for regression models:\n\nD. C. Montgomery and E.A. Peck. \"Introduction to Linear Regression\n Analysis.\" 2nd. Ed., Wiley, 1992.\n\nEconometrics references for regression models:\n\nR. Davidson and J.G. MacKinnon. \"Econometric Theory and Methods,\" Oxford,\n 2004.\n\nW. Green. \"Econometric Analysis,\" 5th ed., Pearson, 2003.\n\"\"\"\n\n__docformat__ = 'restructuredtext en'\n\n__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']\n\nimport numpy as np\nfrom scipy.linalg import toeplitz\nfrom scipy import stats\nfrom scipy.stats.stats import ss\nfrom statsmodels.tools.tools import (add_constant, rank,\n recipr, chain_dot)\nfrom statsmodels.tools.decorators import (resettable_cache,\n cache_readonly, cache_writable)\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.emplike.elregress import _ELRegOpts\nfrom scipy import optimize\nfrom scipy.stats import chi2\n\ndef _get_sigma(sigma, nobs):\n \"\"\"\n Returns sigma for GLS and the inverse of its Cholesky decomposition.\n Handles dimensions and checks integrity. If sigma is None, returns\n None, None. Otherwise returns sigma, cholsigmainv.\n \"\"\"\n if sigma is None:\n return None, None\n sigma = np.asarray(sigma).squeeze()\n if sigma.ndim == 0:\n sigma = np.repeat(sigma, nobs)\n if sigma.ndim == 1:\n if sigma.shape != (nobs,):\n raise ValueError(\"Sigma must be a scalar, 1d of length %s or a 2d \"\n \"array of shape %s x %s\" % (nobs, nobs))\n cholsigmainv = np.diag(1/sigma**.5)\n sigma = np.diag(sigma)\n else:\n if sigma.shape != (nobs, nobs):\n raise ValueError(\"Sigma must be a scalar, 1d of length %s or a 2d \"\n \"array of shape %s x %s\" % (nobs, nobs))\n cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T\n\n return sigma, cholsigmainv\n\nclass RegressionModel(base.LikelihoodModel):\n \"\"\"\n Base class for linear regression models not used by users.\n\n Intended for subclassing.\n \"\"\"\n def __init__(self, endog, exog, **kwargs):\n super(RegressionModel, self).__init__(endog, exog, **kwargs)\n self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])\n\n def initialize(self):\n #print \"calling initialize, now whitening\" #for debugging\n self.wexog = self.whiten(self.exog)\n self.wendog = self.whiten(self.endog)\n # overwrite nobs from class Model:\n self.nobs = float(self.wexog.shape[0])\n self.rank = rank(self.exog)\n self.df_model = float(self.rank - self.k_constant)\n self.df_resid = self.nobs - self.rank\n self.df_model = float(rank(self.exog) - self.k_constant)\n\n def fit(self, method=\"pinv\", **kwargs):\n \"\"\"\n Full fit of the model.\n\n The results include an estimate of covariance matrix, (whitened)\n residuals and an estimate of scale.\n\n Parameters\n ----------\n method : str\n Can be \"pinv\", \"qr\". \"pinv\" uses the Moore-Penrose pseudoinverse\n to solve the least squares problem. \"qr\" uses the QR\n factorization.\n\n Returns\n -------\n A RegressionResults class instance.\n\n See Also\n ---------\n regression.RegressionResults\n\n Notes\n -----\n The fit method uses the pseudoinverse of the design/exogenous variables\n to solve the least squares minimization.\n \"\"\"\n exog = self.wexog\n endog = self.wendog\n\n if method == \"pinv\":\n if ((not hasattr(self, 'pinv_wexog')) or\n (not hasattr(self, 'normalized_cov_params'))):\n #print \"recalculating pinv\" #for debugging\n self.pinv_wexog = pinv_wexog = np.linalg.pinv(self.wexog)\n self.normalized_cov_params = np.dot(pinv_wexog,\n np.transpose(pinv_wexog))\n beta = np.dot(self.pinv_wexog, endog)\n\n elif method == \"qr\":\n if ((not hasattr(self, 'exog_Q')) or\n (not hasattr(self, 'normalized_cov_params'))):\n Q, R = np.linalg.qr(exog)\n self.exog_Q, self.exog_R = Q, R\n self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))\n else:\n Q, R = self.exog_Q, self.exog_R\n\n # used in ANOVA\n self.effects = effects = np.dot(Q.T, endog)\n beta = np.linalg.solve(R, effects)\n\n # no upper triangular solve routine in numpy/scipy?\n if isinstance(self, OLS):\n lfit = OLSResults(self, beta,\n normalized_cov_params=self.normalized_cov_params)\n else:\n lfit = RegressionResults(self, beta,\n normalized_cov_params=self.normalized_cov_params)\n return RegressionResultsWrapper(lfit)\n\n def predict(self, params, exog=None):\n \"\"\"\n Return linear predicted values from a design matrix.\n\n Parameters\n ----------\n params : array-like, optional after fit has been called\n Parameters of a linear model\n exog : array-like, optional.\n Design / exogenous data. Model exog is used if None.\n\n Returns\n -------\n An array of fitted values\n\n Notes\n -----\n If the model as not yet been fit, params is not optional.\n \"\"\"\n #JP: this doesn't look correct for GLMAR\n #SS: it needs its own predict method\n if exog is None:\n exog = self.exog\n return np.dot(exog, params)\n\nclass GLS(RegressionModel):\n __doc__ = \"\"\"\n Generalized least squares model with a general covariance structure.\n\n %(params)s\n sigma : scalar or array\n `sigma` is the weighting matrix of the covariance.\n The default is None for no scaling. If `sigma` is a scalar, it is\n assumed that `sigma` is an n x n diagonal matrix with the given\n scalar, `sigma` as the value of each diagonal element. If `sigma`\n is an n-length vector, then `sigma` is assumed to be a diagonal\n matrix with the given `sigma` on the diagonal. This should be the\n same as WLS.\n %(extra_params)s\n\n Attributes\n ----------\n pinv_wexog : array\n `pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.\n cholsimgainv : array\n The transpose of the Cholesky decomposition of the pseudoinverse.\n df_model : float\n p - 1, where p is the number of regressors including the intercept.\n of freedom.\n df_resid : float\n Number of observations n less the number of parameters p.\n llf : float\n The value of the likelihood function of the fitted model.\n nobs : float\n The number of observations n.\n normalized_cov_params : array\n p x p array :math:`(X^{T}\\Sigma^{-1}X)^{-1}`\n results : RegressionResults instance\n A property that returns the RegressionResults class if fit.\n sigma : array\n `sigma` is the n x n covariance structure of the error terms.\n wexog : array\n Design matrix whitened by `cholsigmainv`\n wendog : array\n Response variable whitened by `cholsigmainv`\n\n Notes\n -----\n If sigma is a function of the data making one of the regressors\n a constant, then the current postestimation statistics will not be correct.\n\n\n Examples\n --------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.longley.load()\n >>> data.exog = sm.add_constant(data.exog)\n >>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid\n >>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()\n >>> rho = res_fit.params\n\n `rho` is a consistent estimator of the correlation of the residuals from\n an OLS fit of the longley data. It is assumed that this is the true rho\n of the AR process data.\n\n >>> from scipy.linalg import toeplitz\n >>> order = toeplitz(np.arange(16))\n >>> sigma = rho**order\n\n `sigma` is an n x n matrix of the autocorrelation structure of the\n data.\n\n >>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)\n >>> gls_results = gls_model.fit()\n >>> print gls_results.summary()\n\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n\n def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None):\n #TODO: add options igls, for iterative fgls if sigma is None\n #TODO: default is sigma is none should be two-step GLS\n sigma, cholsigmainv = _get_sigma(sigma, len(endog))\n super(GLS, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst, sigma=sigma,\n cholsigmainv=cholsigmainv)\n\n #store attribute names for data arrays\n self._data_attr.extend(['sigma', 'cholsigmainv'])\n\n\n def whiten(self, X):\n \"\"\"\n GLS whiten method.\n\n Parameters\n -----------\n X : array-like\n Data to be whitened.\n\n Returns\n -------\n np.dot(cholsigmainv,X)\n\n See Also\n --------\n regression.GLS\n \"\"\"\n X = np.asarray(X)\n if np.any(self.sigma) and not self.sigma.shape == ():\n return np.dot(self.cholsigmainv, X)\n else:\n return X\n\n def loglike(self, params):\n \"\"\"\n Returns the value of the gaussian loglikelihood function at params.\n\n Given the whitened design matrix, the loglikelihood is evaluated\n at the parameter vector `params` for the dependent variable `endog`.\n\n Parameters\n ----------\n params : array-like\n The parameter estimates\n\n Returns\n -------\n loglike : float\n The value of the loglikelihood function for a GLS Model.\n\n\n Notes\n -----\n The loglikelihood function for the normal distribution is\n\n .. math:: -\\\\frac{n}{2}\\\\log\\\\left(Y-\\\\hat{Y}\\\\right)-\\\\frac{n}{2}\\\\left(1+\\\\log\\\\left(\\\\frac{2\\\\pi}{n}\\\\right)\\\\right)-\\\\frac{1}{2}\\\\log\\\\left(\\\\left|\\\\Sigma\\\\right|\\\\right)\n\n Y and Y-hat are whitened.\n\n \"\"\"\n #TODO: combine this with OLS/WLS loglike and add _det_sigma argument\n nobs2 = self.nobs / 2.0\n SSR = ss(self.wendog - np.dot(self.wexog,params))\n llf = -np.log(SSR) * nobs2 # concentrated likelihood\n llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant\n if np.any(self.sigma) and self.sigma.ndim == 2:\n #FIXME: robust-enough check? unneeded if _det_sigma gets defined\n llf -= .5*np.log(np.linalg.det(self.sigma))\n # with error covariance matrix\n return llf\n\nclass WLS(RegressionModel):\n __doc__ = \"\"\"\n A regression model with diagonal but non-identity covariance structure.\n\n The weights are presumed to be (proportional to) the inverse of the\n variance of the observations. That is, if the variables are to be\n transformed by 1/sqrt(W) you must supply weights = 1/W.\n\n %(params)s\n weights : array-like, optional\n 1d array of weights. If you supply 1/W then the variables are pre-\n multiplied by 1/sqrt(W). If no weights are supplied the default value\n is 1 and WLS reults are the same as OLS.\n %(extra_params)s\n\n Attributes\n ----------\n weights : array\n The stored weights supplied as an argument.\n\n See regression.GLS\n\n Examples\n ---------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> Y = [1,3,4,5,2,3,4]\n >>> X = range(1,8)\n >>> X = sm.add_constant(X)\n >>> wls_model = sm.WLS(Y,X, weights=range(1,8))\n >>> results = wls_model.fit()\n >>> results.params\n array([ 2.91666667, 0.0952381 ])\n >>> results.tvalues\n array([ 2.0652652 , 0.35684428])\n >>> print results.t_test([1, 0])\n \n >>> print results.f_test([0, 1])\n \n\n Notes\n -----\n If the weights are a function of the data, then the postestimation\n statistics such as fvalue and mse_model might not be correct, as the\n package does not yet support no-constant regression.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n\n def __init__(self, endog, exog, weights=1., missing='none', hasconst=None):\n weights = np.array(weights)\n if weights.shape == ():\n weights = np.repeat(weights, len(endog))\n weights = weights.squeeze()\n super(WLS, self).__init__(endog, exog, missing=missing,\n weights=weights, hasconst=hasconst)\n nobs = self.exog.shape[0]\n weights = self.weights\n if len(weights) != nobs and weights.size == nobs:\n raise ValueError('Weights must be scalar or same length as design')\n\n def whiten(self, X):\n \"\"\"\n Whitener for WLS model, multiplies each column by sqrt(self.weights)\n\n Parameters\n ----------\n X : array-like\n Data to be whitened\n\n Returns\n -------\n sqrt(weights)*X\n \"\"\"\n #print self.weights.var()\n X = np.asarray(X)\n if X.ndim == 1:\n return X * np.sqrt(self.weights)\n elif X.ndim == 2:\n return np.sqrt(self.weights)[:,None]*X\n\n def loglike(self, params):\n \"\"\"\n Returns the value of the gaussian loglikelihood function at params.\n\n Given the whitened design matrix, the loglikelihood is evaluated\n at the parameter vector `params` for the dependent variable `Y`.\n\n Parameters\n ----------\n params : array-like\n The parameter estimates.\n\n Returns\n -------\n The value of the loglikelihood function for a WLS Model.\n\n Notes\n --------\n .. math:: -\\\\frac{n}{2}\\\\log\\\\left(Y-\\\\hat{Y}\\\\right)-\\\\frac{n}{2}\\\\left(1+\\\\log\\\\left(\\\\frac{2\\\\pi}{n}\\\\right)\\\\right)-\\\\frac{1}{2}log\\\\left(\\\\left|W\\\\right|\\\\right)\n\n where :math:`W` is a diagonal matrix\n \"\"\"\n nobs2 = self.nobs / 2.0\n SSR = ss(self.wendog - np.dot(self.wexog,params))\n llf = -np.log(SSR) * nobs2 # concentrated likelihood\n llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant\n return llf\n\n\nclass OLS(WLS):\n __doc__ = \"\"\"\n A simple ordinary least squares model.\n\n %(params)s\n %(extra_params)s\n\n Attributes\n ----------\n weights : scalar\n Has an attribute weights = array(1.0) due to inheritance from WLS.\n\n See regression.GLS\n\n Examples\n --------\n >>> import numpy as np\n >>>\n >>> import statsmodels.api as sm\n >>>\n >>> Y = [1,3,4,5,2,3,4]\n >>> X = range(1,8)\n >>> X = sm.add_constant(X)\n >>>\n >>> model = sm.OLS(Y,X)\n >>> results = model.fit()\n >>> results.params\n array([ 2.14285714, 0.25 ])\n >>> results.tvalues\n array([ 1.87867287, 0.98019606])\n >>> print results.t_test([1, 0])\n \n >>> print results.f_test(np.identity(2))\n \n\n Notes\n -----\n No constant is added by the model unless you are using formulas.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n #TODO: change example to use datasets. This was the point of datasets!\n def __init__(self, endog, exog=None, missing='none', hasconst=None):\n super(OLS, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst)\n\n def loglike(self, params):\n '''\n The likelihood function for the clasical OLS model.\n\n Parameters\n ----------\n params : array-like\n The coefficients with which to estimate the loglikelihood.\n\n Returns\n -------\n The concentrated likelihood function evaluated at params.\n '''\n nobs2 = self.nobs/2.\n return -nobs2*np.log(2*np.pi)-nobs2*np.log(1/(2*nobs2) *\\\n np.dot(np.transpose(self.endog -\n np.dot(self.exog, params)),\n (self.endog - np.dot(self.exog,params)))) -\\\n nobs2\n\n def whiten(self, Y):\n \"\"\"\n OLS model whitener does nothing: returns Y.\n \"\"\"\n return Y\n\nclass GLSAR(GLS):\n __doc__ = \"\"\"\n A regression model with an AR(p) covariance structure.\n\n %(params)s\n rho : int\n Order of the autoregressive covariance\n %(extra_params)s\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> X = range(1,8)\n >>> X = sm.add_constant(X)\n >>> Y = [1,3,4,5,8,10,9]\n >>> model = sm.GLSAR(Y, X, rho=2)\n >>> for i in range(6):\n ... results = model.fit()\n ... print \"AR coefficients:\", model.rho\n ... rho, sigma = sm.regression.yule_walker(results.resid,\n ... order=model.order)\n ... model = sm.GLSAR(Y, X, rho)\n ...\n AR coefficients: [ 0. 0.]\n AR coefficients: [-0.52571491 -0.84496178]\n AR coefficients: [-0.6104153 -0.86656458]\n AR coefficients: [-0.60439494 -0.857867 ]\n AR coefficients: [-0.6048218 -0.85846157]\n AR coefficients: [-0.60479146 -0.85841922]\n >>> results.params\n array([-0.66661205, 1.60850853])\n >>> results.tvalues\n array([ -2.10304127, 21.8047269 ])\n >>> print results.t_test([1, 0])\n \n >>> print results.f_test(np.identity(2))\n \n\n Or, equivalently\n\n >>> model2 = sm.GLSAR(Y, X, rho=2)\n >>> res = model2.iterative_fit(maxiter=6)\n >>> model2.rho\n array([-0.60479146, -0.85841922])\n\n Notes\n -----\n GLSAR is considered to be experimental.\n The linear autoregressive process of order p--AR(p)--is defined as:\n TODO\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n def __init__(self, endog, exog=None, rho=1, missing='none'):\n #this looks strange, interpreting rho as order if it is int\n if isinstance(rho, np.int):\n self.order = rho\n self.rho = np.zeros(self.order, np.float64)\n else:\n self.rho = np.squeeze(np.asarray(rho))\n if len(self.rho.shape) not in [0,1]:\n raise ValueError(\"AR parameters must be a scalar or a vector\")\n if self.rho.shape == ():\n self.rho.shape = (1,)\n self.order = self.rho.shape[0]\n if exog is None:\n #JP this looks wrong, should be a regression on constant\n #results for rho estimate now identical to yule-walker on y\n #super(AR, self).__init__(endog, add_constant(endog))\n super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),\n missing=missing)\n else:\n super(GLSAR, self).__init__(endog, exog, missing=missing)\n\n def iterative_fit(self, maxiter=3):\n \"\"\"\n Perform an iterative two-stage procedure to estimate a GLS model.\n\n The model is assumed to have AR(p) errors, AR(p) parameters and\n regression coefficients are estimated iteratively.\n\n Parameters\n ----------\n maxiter : integer, optional\n the number of iterations\n \"\"\"\n #TODO: update this after going through example.\n for i in range(maxiter-1):\n if hasattr(self, 'pinv_wexog'):\n del self.pinv_wexog\n self.initialize()\n results = self.fit()\n self.rho, _ = yule_walker(results.resid,\n order=self.order, df=None)\n #why not another call to self.initialize\n if hasattr(self, 'pinv_wexog'):\n del self.pinv_wexog\n self.initialize()\n results = self.fit() #final estimate\n return results # add missing return\n\n def whiten(self, X):\n \"\"\"\n Whiten a series of columns according to an AR(p)\n covariance structure. This drops initial p observations.\n\n Parameters\n ----------\n X : array-like\n The data to be whitened,\n\n Returns\n -------\n whitened array\n\n \"\"\"\n #TODO: notation for AR process\n X = np.asarray(X, np.float64)\n _X = X.copy()\n\n #the following loops over the first axis, works for 1d and nd\n for i in range(self.order):\n _X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]\n return _X[self.order:]\n\n\ndef yule_walker(X, order=1, method=\"unbiased\", df=None, inv=False, demean=True):\n \"\"\"\n Estimate AR(p) parameters from a sequence X using Yule-Walker equation.\n\n Unbiased or maximum-likelihood estimator (mle)\n\n See, for example:\n\n http://en.wikipedia.org/wiki/Autoregressive_moving_average_model\n\n Parameters\n ----------\n X : array-like\n 1d array\n order : integer, optional\n The order of the autoregressive process. Default is 1.\n method : string, optional\n Method can be \"unbiased\" or \"mle\" and this determines denominator in\n estimate of autocorrelation function (ACF) at lag k. If \"mle\", the\n denominator is n=X.shape[0], if \"unbiased\" the denominator is n-k.\n The default is unbiased.\n df : integer, optional\n Specifies the degrees of freedom. If `df` is supplied, then it is assumed\n the X has `df` degrees of freedom rather than `n`. Default is None.\n inv : bool\n If inv is True the inverse of R is also returned. Default is False.\n demean : bool\n True, the mean is subtracted from `X` before estimation.\n\n Returns\n -------\n rho\n The autoregressive coefficients\n sigma\n TODO\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> from statsmodels.datasets.sunspots import load\n >>> data = load()\n >>> rho, sigma = sm.regression.yule_walker(data.endog,\n order=4, method=\"mle\")\n\n >>> rho\n array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])\n >>> sigma\n 16.808022730464351\n\n \"\"\"\n #TODO: define R better, look back at notes and technical notes on YW.\n #First link here is useful\n #http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm\n method = str(method).lower()\n if method not in [\"unbiased\", \"mle\"]:\n raise ValueError(\"ACF estimation method must be 'unbiased' or 'MLE'\")\n X = np.array(X)\n if demean:\n X -= X.mean() # automatically demean's X\n n = df or X.shape[0]\n\n if method == \"unbiased\": # this is df_resid ie., n - p\n denom = lambda k: n - k\n else:\n denom = lambda k: n\n if X.ndim > 1 and X.shape[1] != 1:\n raise ValueError(\"expecting a vector to estimate AR parameters\")\n r = np.zeros(order+1, np.float64)\n r[0] = (X**2).sum() / denom(0)\n for k in range(1,order+1):\n r[k] = (X[0:-k]*X[k:]).sum() / denom(k)\n R = toeplitz(r[:-1])\n\n rho = np.linalg.solve(R, r[1:])\n sigmasq = r[0] - (r[1:]*rho).sum()\n if inv == True:\n return rho, np.sqrt(sigmasq), np.linalg.inv(R)\n else:\n return rho, np.sqrt(sigmasq)\n\nclass RegressionResults(base.LikelihoodModelResults):\n \"\"\"\n This class summarizes the fit of a linear regression model.\n\n It handles the output of contrasts, estimates of covariance, etc.\n\n Returns\n -------\n **Attributes**\n\n aic\n Aikake's information criteria. For a model with a constant\n :math:`-2llf + 2(df_model + 1)`. For a model without a constant\n :math:`-2llf + 2(df_model)`.\n bic\n Bayes' information criteria For a model with a constant\n :math:`-2llf + \\log(n)(df_model+1)`. For a model without a constant\n :math:`-2llf + \\log(n)(df_model)`\n bse\n The standard errors of the parameter estimates.\n pinv_wexog\n See specific model class docstring\n centered_tss\n The total (weighted) sum of squares centered about the mean.\n cov_HC0\n See HC0_se below. Only available after calling HC0_se.\n cov_HC1\n See HC1_se below. Only available after calling HC1_se.\n cov_HC2\n See HC2_se below. Only available after calling HC2_se.\n cov_HC3\n See HC3_se below. Only available after calling HC3_se.\n df_model :\n Model degress of freedom. The number of regressors `p`. Does not\n include the constant if one is present\n df_resid\n Residual degrees of freedom. `n - p - 1`, if a constant is present.\n `n - p` if a constant is not included.\n ess\n Explained sum of squares. If a constant is present, the centered\n total sum of squares minus the sum of squared residuals. If there is\n no constant, the uncentered total sum of squares is used.\n fvalue\n F-statistic of the fully specified model. Calculated as the mean\n squared error of the model divided by the mean squared error of the\n residuals.\n f_pvalue\n p-value of the F-statistic\n fittedvalues\n The predicted the values for the original (unwhitened) design.\n het_scale\n Only available if HC#_se is called. See HC#_se for more information.\n HC0_se\n White's (1980) heteroskedasticity robust standard errors.\n Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)\n where e_i = resid[i]\n HC0_se is a property. It is not evaluated until it is called.\n When it is called the RegressionResults instance will then have\n another attribute cov_HC0, which is the full heteroskedasticity\n consistent covariance matrix and also `het_scale`, which is in\n this case just resid**2. HCCM matrices are only appropriate for OLS.\n HC1_se\n MacKinnon and White's (1985) alternative heteroskedasticity robust\n standard errors.\n Defined as sqrt(diag(n/(n-p)*HC_0)\n HC1_se is a property. It is not evaluated until it is called.\n When it is called the RegressionResults instance will then have\n another attribute cov_HC1, which is the full HCCM and also `het_scale`,\n which is in this case n/(n-p)*resid**2. HCCM matrices are only\n appropriate for OLS.\n HC2_se\n MacKinnon and White's (1985) alternative heteroskedasticity robust\n standard errors.\n Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)\n where h_ii = x_i(X.T X)^(-1)x_i.T\n HC2_se is a property. It is not evaluated until it is called.\n When it is called the RegressionResults instance will then have\n another attribute cov_HC2, which is the full HCCM and also `het_scale`,\n which is in this case is resid^(2)/(1-h_ii). HCCM matrices are only\n appropriate for OLS.\n HC3_se\n MacKinnon and White's (1985) alternative heteroskedasticity robust\n standard errors.\n Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)\n where h_ii = x_i(X.T X)^(-1)x_i.T\n HC3_se is a property. It is not evaluated until it is called.\n When it is called the RegressionResults instance will then have\n another attribute cov_HC3, which is the full HCCM and also `het_scale`,\n which is in this case is resid^(2)/(1-h_ii)^(2). HCCM matrices are\n only appropriate for OLS.\n model\n A pointer to the model instance that called fit() or results.\n mse_model\n Mean squared error the model. This is the explained sum of squares\n divided by the model degrees of freedom.\n mse_resid\n Mean squared error of the residuals. The sum of squared residuals\n divided by the residual degrees of freedom.\n mse_total\n Total mean squared error. Defined as the uncentered total sum of\n squares divided by n the number of observations.\n nobs\n Number of observations n.\n normalized_cov_params\n See specific model class docstring\n params\n The linear coefficients that minimize the least squares criterion. This\n is usually called Beta for the classical linear model.\n pvalues\n The two-tailed p values for the t-stats of the params.\n resid\n The residuals of the model.\n rsquared\n R-squared of a model with an intercept. This is defined here as\n 1 - `ssr`/`centered_tss` if the constant is included in the model and\n 1 - `ssr`/`uncentered_tss` if the constant is omitted.\n rsquared_adj\n Adjusted R-squared. This is defined here as\n 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included\n and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.\n scale\n A scale factor for the covariance matrix.\n Default value is ssr/(n-p). Note that the square root of `scale` is\n often called the standard error of the regression.\n ssr\n Sum of squared (whitened) residuals.\n uncentered_tss\n Uncentered sum of squares. Sum of the squared values of the\n (whitened) endogenous response variable.\n wresid\n The residuals of the transformed/whitened regressand and regressor(s)\n \"\"\"\n\n # For robust covariance matrix properties\n _HC0_se = None\n _HC1_se = None\n _HC2_se = None\n _HC3_se = None\n\n _cache = {} # needs to be a class attribute for scale setter?\n\n def __init__(self, model, params, normalized_cov_params=None, scale=1.):\n super(RegressionResults, self).__init__(model, params,\n normalized_cov_params,\n scale)\n self._cache = resettable_cache()\n\n def __str__(self):\n self.summary()\n\n def conf_int(self, alpha=.05, cols=None):\n \"\"\"\n Returns the confidence interval of the fitted parameters.\n\n Parameters\n ----------\n alpha : float, optional\n The `alpha` level for the confidence interval.\n ie., The default `alpha` = .05 returns a 95% confidence interval.\n cols : array-like, optional\n `cols` specifies which confidence intervals to return\n\n Notes\n -----\n The confidence interval is based on Student's t-distribution.\n \"\"\"\n bse = self.bse\n params = self.params\n dist = stats.t\n q = dist.ppf(1 - alpha / 2, self.df_resid)\n\n if cols is None:\n lower = self.params - q * bse\n upper = self.params + q * bse\n else:\n cols = np.asarray(cols)\n lower = params[cols] - q * bse[cols]\n upper = params[cols] + q * bse[cols]\n return np.asarray(zip(lower, upper))\n\n @cache_readonly\n def df_resid(self):\n return self.model.df_resid\n\n @cache_readonly\n def df_model(self):\n return self.model.df_model\n\n @cache_readonly\n def nobs(self):\n return float(self.model.wexog.shape[0])\n\n @cache_readonly\n def fittedvalues(self):\n return self.model.predict(self.params, self.model.exog)\n\n @cache_readonly\n def wresid(self):\n return self.model.wendog - self.model.predict(self.params,\n self.model.wexog)\n\n @cache_readonly\n def resid(self):\n return self.model.endog - self.model.predict(self.params,\n self.model.exog)\n\n #TODO: fix writable example\n @cache_writable()\n def scale(self):\n wresid = self.wresid\n return np.dot(wresid, wresid) / self.df_resid\n\n @cache_readonly\n def ssr(self):\n wresid = self.wresid\n return np.dot(wresid, wresid)\n\n @cache_readonly\n def centered_tss(self):\n model = self.model\n weights = getattr(model, 'weights', None)\n if weights is not None:\n return np.sum(weights*(model.endog - np.average(model.endog,\n weights=weights))**2)\n else: # this is probably broken for GLS\n centered_endog = model.wendog - model.wendog.mean()\n return np.dot(centered_endog, centered_endog)\n\n @cache_readonly\n def uncentered_tss(self):\n wendog = self.model.wendog\n return np.dot(wendog, wendog)\n\n @cache_readonly\n def ess(self):\n if self.k_constant:\n return self.centered_tss - self.ssr\n else:\n return self.uncentered_tss - self.ssr\n\n @cache_readonly\n def rsquared(self):\n if self.k_constant:\n return 1 - self.ssr/self.centered_tss\n else:\n return 1 - self.ssr/self.uncentered_tss\n\n @cache_readonly\n def rsquared_adj(self):\n return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)\n\n @cache_readonly\n def mse_model(self):\n return self.ess/self.df_model\n\n @cache_readonly\n def mse_resid(self):\n return self.ssr/self.df_resid\n\n @cache_readonly\n def mse_total(self):\n if self.k_constant:\n return self.centered_tss / (self.df_resid + self.df_model)\n else:\n return self.uncentered_tss/ (self.df_resid + self.df_model)\n\n @cache_readonly\n def fvalue(self):\n return self.mse_model/self.mse_resid\n\n @cache_readonly\n def f_pvalue(self):\n return stats.f.sf(self.fvalue, self.df_model, self.df_resid)\n\n @cache_readonly\n def bse(self):\n return np.sqrt(np.diag(self.cov_params()))\n\n @cache_readonly\n def pvalues(self):\n return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2\n\n @cache_readonly\n def aic(self):\n return -2 * self.llf + 2 * (self.df_model + self.k_constant)\n\n @cache_readonly\n def bic(self):\n return (-2 * self.llf + np.log(self.nobs) * (self.df_model +\n self.k_constant))\n\n #TODO: make these properties reset bse\n def _HCCM(self, scale):\n H = np.dot(self.model.pinv_wexog,\n scale[:,None]*self.model.pinv_wexog.T)\n return H\n\n @property\n def HC0_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n if self._HC0_se is None:\n self.het_scale = self.resid**2 # or whitened residuals? only OLS?\n self.cov_HC0 = self._HCCM(self.het_scale)\n self._HC0_se = np.sqrt(np.diag(self.cov_HC0))\n return self._HC0_se\n\n @property\n def HC1_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n if self._HC1_se is None:\n self.het_scale = self.nobs/(self.df_resid)*(self.resid**2)\n self.cov_HC1 = self._HCCM(self.het_scale)\n self._HC1_se = np.sqrt(np.diag(self.cov_HC1))\n return self._HC1_se\n\n @property\n def HC2_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n if self._HC2_se is None:\n # probably could be optimized\n h = np.diag(chain_dot(self.model.exog,\n self.normalized_cov_params,\n self.model.exog.T))\n self.het_scale = self.resid**2/(1-h)\n self.cov_HC2 = self._HCCM(self.het_scale)\n self._HC2_se = np.sqrt(np.diag(self.cov_HC2))\n return self._HC2_se\n\n @property\n def HC3_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n if self._HC3_se is None:\n # above probably could be optimized to only calc the diag\n h = np.diag(chain_dot(self.model.exog,\n self.normalized_cov_params,\n self.model.exog.T))\n self.het_scale=(self.resid/(1-h))**2\n self.cov_HC3 = self._HCCM(self.het_scale)\n self._HC3_se = np.sqrt(np.diag(self.cov_HC3))\n return self._HC3_se\n\n #TODO: this needs a test\n def norm_resid(self):\n \"\"\"\n Residuals, normalized to have unit length and unit variance.\n\n Returns\n -------\n An array wresid/sqrt(scale)\n\n Notes\n -----\n This method is untested\n \"\"\"\n if not hasattr(self, 'resid'):\n raise ValueError('need normalized residuals to estimate standard '\n 'deviation')\n return self.wresid * recipr(np.sqrt(self.scale))\n\n def compare_f_test(self, restricted):\n '''use F test to test whether restricted model is correct\n\n Parameters\n ----------\n restricted : Result instance\n The restricted model is assumed to be nested in the current\n model. The result instance of the restricted model is required to\n have two attributes, residual sum of squares, `ssr`, residual\n degrees of freedom, `df_resid`.\n\n Returns\n -------\n f_value : float\n test statistic, F distributed\n p_value : float\n p-value of the test statistic\n df_diff : int\n degrees of freedom of the restriction, i.e. difference in df between\n models\n\n Notes\n -----\n See mailing list discussion October 17,\n\n '''\n ssr_full = self.ssr\n ssr_restr = restricted.ssr\n df_full = self.df_resid\n df_restr = restricted.df_resid\n\n df_diff = (df_restr - df_full)\n f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full\n p_value = stats.f.sf(f_value, df_diff, df_full)\n return f_value, p_value, df_diff\n\n def compare_lr_test(self, restricted):\n '''\n Likelihood ratio test to test whether restricted model is correct\n\n Parameters\n ----------\n restricted : Result instance\n The restricted model is assumed to be nested in the current model.\n The result instance of the restricted model is required to have two\n attributes, residual sum of squares, `ssr`, residual degrees of\n freedom, `df_resid`.\n\n Returns\n -------\n lr_stat : float\n likelihood ratio, chisquare distributed with df_diff degrees of\n freedom\n p_value : float\n p-value of the test statistic\n df_diff : int\n degrees of freedom of the restriction, i.e. difference in df between\n models\n\n Notes\n -----\n\n .. math:: D=-2\\\\log\\\\left(\\\\frac{\\\\mathcal{L}_{null}}\n {\\\\mathcal{L}_{alternative}}\\\\right)\n\n where :math:`\\mathcal{L}` is the likelihood of the model. With :math:`D`\n distributed as chisquare with df equal to difference in number of\n parameters or equivalently difference in residual degrees of freedom\n\n TODO: put into separate function, needs tests\n '''\n # See mailing list discussion October 17,\n llf_full = self.llf\n llf_restr = restricted.llf\n df_full = self.df_resid\n df_restr = restricted.df_resid\n\n lrdf = (df_restr - df_full)\n lrstat = -2*(llf_restr - llf_full)\n lr_pvalue = stats.chi2.sf(lrstat, lrdf)\n\n return lrstat, lr_pvalue, lrdf\n\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05):\n \"\"\"Summarize the Regression Results\n\n Parameters\n -----------\n yname : string, optional\n Default is `y`\n xname : list of strings, optional\n Default is `var_##` for ## in p the number of regressors\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n\n #TODO: import where we need it (for now), add as cached attributes\n from statsmodels.stats.stattools import (jarque_bera,\n omni_normtest, durbin_watson)\n jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)\n omni, omnipv = omni_normtest(self.wresid)\n\n #TODO: reuse condno from somewhere else ?\n #condno = np.linalg.cond(np.dot(self.wexog.T, self.wexog))\n wexog = self.model.wexog\n eigvals = np.linalg.linalg.eigvalsh(np.dot(wexog.T, wexog))\n eigvals = np.sort(eigvals) #in increasing order\n condno = np.sqrt(eigvals[-1]/eigvals[0])\n\n self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,\n omni=omni, omnipv=omnipv, condno=condno,\n mineigval=eigvals[0])\n\n #TODO not used yet\n #diagn_left_header = ['Models stats']\n #diagn_right_header = ['Residual stats']\n\n #TODO: requiring list/iterable is a bit annoying\n #need more control over formatting\n #TODO: default don't work if it's not identically spelled\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['Least Squares']),\n ('Date:', None),\n ('Time:', None),\n ('No. Observations:', None),\n ('Df Residuals:', None), #[self.df_resid]), #TODO: spelling\n ('Df Model:', None), #[self.df_model])\n ]\n\n top_right = [('R-squared:', [\"%#8.3f\" % self.rsquared]),\n ('Adj. R-squared:', [\"%#8.3f\" % self.rsquared_adj]),\n ('F-statistic:', [\"%#8.4g\" % self.fvalue] ),\n ('Prob (F-statistic):', [\"%#6.3g\" % self.f_pvalue]),\n ('Log-Likelihood:', None), #[\"%#6.4g\" % self.llf]),\n ('AIC:', [\"%#8.4g\" % self.aic]),\n ('BIC:', [\"%#8.4g\" % self.bic])\n ]\n\n diagn_left = [('Omnibus:', [\"%#6.3f\" % omni]),\n ('Prob(Omnibus):', [\"%#6.3f\" % omnipv]),\n ('Skew:', [\"%#6.3f\" % skew]),\n ('Kurtosis:', [\"%#6.3f\" % kurtosis])\n ]\n\n diagn_right = [('Durbin-Watson:', [\"%#8.3f\" % durbin_watson(self.wresid)]),\n ('Jarque-Bera (JB):', [\"%#8.3f\" % jb]),\n ('Prob(JB):', [\"%#8.3g\" % jbpv]),\n ('Cond. No.', [\"%#8.3g\" % condno])\n ]\n\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Regression Results\"\n\n #create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=True)\n\n smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n yname=yname, xname=xname,\n title=\"\")\n\n #add warnings/notes, added to text format only\n etext =[]\n if self.model.exog.shape[0] < self.model.exog.shape[1]:\n wstr = \"The input rank is higher than the number of observations.\"\n etext.append(wstr)\n if eigvals[0] < 1e-10:\n wstr = \"The smallest eigenvalue is %6.3g. This might indicate \"\n wstr += \"that there are\\n\"\n wstr += \"strong multicollinearity problems or that the design \"\n wstr += \"matrix is singular.\"\n wstr = wstr % eigvals[0]\n etext.append(wstr)\n elif condno > 1000: #TODO: what is recommended\n wstr = \"The condition number is large, %6.3g. This might \"\n wstr += \"indicate that there are\\n\"\n wstr += \"strong multicollinearity or other numerical \"\n wstr += \"problems.\"\n wstr = wstr % condno\n etext.append(wstr)\n\n if etext:\n etext = [\"[{0}] {1}\".format(i + 1, text) for i, text in enumerate(etext)]\n etext.insert(0, \"Warnings:\")\n smry.add_extra_txt(etext)\n\n return smry\n\n #top = summary_top(self, gleft=topleft, gright=diagn_left, #[],\n # yname=yname, xname=xname,\n # title=self.model.__class__.__name__ + ' ' +\n # \"Regression Results\")\n #par = summary_params(self, yname=yname, xname=xname, alpha=.05,\n # use_t=False)\n #\n #diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,\n # yname=yname, xname=xname,\n # title=\"Linear Model\")\n #\n #return summary_return([top, par, diagn], return_fmt=return_fmt)\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental summary function to summarize the regression results\n\n Parameters\n -----------\n xname : List of strings of length equal to the number of parameters\n Names of the independent variables (optional)\n yname : string\n Name of the dependent variable (optional)\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n # Diagnostics\n from statsmodels.stats.stattools import (jarque_bera,\n omni_normtest,\n durbin_watson)\n from numpy.linalg import (cond, eigvalsh)\n from statsmodels.compatnp.collections import OrderedDict\n jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)\n omni, omnipv = omni_normtest(self.wresid)\n dw = durbin_watson(self.wresid)\n condno = cond(self.model.wexog)\n eigvals = eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))\n eigvals = np.sort(eigvals) #in increasing order\n diagnostic = OrderedDict([\n ('Omnibus:', \"%.3f\" % omni),\n ('Prob(Omnibus):', \"%.3f\" % omnipv),\n ('Skew:', \"%.3f\" % skew),\n ('Kurtosis:', \"%.3f\" % kurtosis),\n ('Durbin-Watson:', \"%.3f\" % dw),\n ('Jarque-Bera (JB):', \"%.3f\" % jb),\n ('Prob(JB):', \"%.3f\" % jbpv),\n ('Condition No.:', \"%.0f\" % condno)\n ])\n\n # Summary\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n smry.add_dict(diagnostic)\n\n # Warnings\n if eigvals[0] < 1e-10:\n warn = \"The smallest eigenvalue is %6.3g. This might indicate that\\\n there are strong multicollinearity problems or that the design\\\n matrix is singular.\" % eigvals[0]\n smry.add_text(warn)\n if condno > 1000:\n warn = \"* The condition number is large (%.g). This might indicate \\\n strong multicollinearity or other numerical problems.\" % condno\n smry.add_text(warn)\n\n return smry\n\n\nclass OLSResults(RegressionResults):\n \"\"\"\n Results class for for an OLS model.\n\n Most of the methods and attributes are inherited from RegressionResults.\n The special methods that are only available for OLS are:\n\n - get_influence\n - outlier_test\n - el_test\n - conf_int_el\n\n See Also\n --------\n RegressionResults\n\n \"\"\"\n\n def get_influence(self):\n \"\"\"\n get an instance of Influence with influence and outlier measures\n\n Returns\n -------\n infl : Influence instance\n the instance has methods to calculate the main influence and\n outlier measures for the OLS regression\n\n \"\"\"\n from statsmodels.stats.outliers_influence import OLSInfluence\n return OLSInfluence(self)\n\n def outlier_test(self, method='bonf', alpha=.05):\n \"\"\"\n Test observations for outliers according to method\n\n Parameters\n ----------\n method : str\n\n - `bonferroni` : one-step correction\n - `sidak` : one-step correction\n - `holm-sidak` :\n - `holm` :\n - `simes-hochberg` :\n - `hommel` :\n - `fdr_bh` : Benjamini/Hochberg\n - `fdr_by` : Benjamini/Yekutieli\n\n See `statsmodels.stats.multitest.multipletests` for details.\n alpha : float\n familywise error rate\n\n Returns\n -------\n table : ndarray or DataFrame\n Returns either an ndarray or a DataFrame if labels is not None.\n Will attempt to get labels from model_results if available. The\n columns are the Studentized residuals, the unadjusted p-value,\n and the corrected p-value according to method.\n\n Notes\n -----\n The unadjusted p-value is stats.t.sf(abs(resid), df) where\n df = df_resid - 1.\n \"\"\"\n from statsmodels.stats.outliers_influence import outlier_test\n return outlier_test(self, method, alpha)\n\n def el_test(self, b0_vals, param_nums, return_weights=0,\n ret_params=0, method='nm',\n stochastic_exog=1, return_params=0):\n \"\"\"\n Tests single or joint hypotheses of the regression parameters.\n\n Parameters\n ----------\n\n b0_vals : 1darray\n The hypthesized value of the parameter to be tested\n\n param_nums : 1darray\n The parameter number to be tested\n\n print_weights : bool\n If true, returns the weights that optimize the likelihood\n ratio at b0_vals. Default is False\n\n ret_params : bool\n If true, returns the parameter vector that maximizes the likelihood\n ratio at b0_vals. Also returns the weights. Default is False\n\n method : string\n Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The\n optimization method that optimizes over nuisance parameters.\n Default is 'nm'\n\n stochastic_exog : bool\n When TRUE, the exogenous variables are assumed to be stochastic.\n When the regressors are nonstochastic, moment conditions are\n placed on the exogenous variables. Confidence intervals for\n stochastic regressors are at least as large as non-stochastic\n regressors. Default = TRUE\n\n Returns\n -------\n\n res : tuple\n The p-value and -2 times the log likelihood ratio for the\n hypothesized values.\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.stackloss.load()\n >>> endog = data.endog\n >>> exog = sm.add_constant(data.exog)\n >>> model = sm.OLS(endog, exog)\n >>> fitted = model.fit()\n >>> fitted.params\n >>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])\n >>> fitted.rsquared\n >>> 0.91357690446068196\n >>> # Test that the slope on the first variable is 0\n >>> fitted.test_beta([0], [1])\n >>> (1.7894660442330235e-07, 27.248146353709153)\n \"\"\"\n params = np.copy(self.params)\n opt_fun_inst = _ELRegOpts() # to store weights\n if len(param_nums) == len(params):\n llr = opt_fun_inst._opt_nuis_regress(b0_vals,\n param_nums=param_nums,\n endog=self.model.endog,\n exog=self.model.exog,\n nobs=self.model.nobs,\n nvar=self.model.exog.shape[1],\n params=params,\n b0_vals=b0_vals,\n stochastic_exog=stochastic_exog)\n pval = 1 - chi2.cdf(llr, len(param_nums))\n if return_weights:\n return llr, pval, opt_fun_inst.new_weights\n else:\n return llr, pval\n x0 = np.delete(params, param_nums)\n args = (param_nums, self.model.endog, self.model.exog,\n self.model.nobs, self.model.exog.shape[1], params,\n b0_vals, stochastic_exog)\n if method == 'nm':\n llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,\n maxiter=10000, full_output=1, disp=0,\n args=args)[1]\n if method == 'powell':\n llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,\n full_output=1, disp=0,\n args=args)[1]\n\n pval = 1 - chi2.cdf(llr, len(param_nums))\n if ret_params:\n return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params\n elif return_weights:\n return llr, pval, opt_fun_inst.new_weights\n else:\n return llr, pval\n\n def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,\n method='nm', stochastic_exog=1):\n \"\"\"\n Computes the confidence interval for the parameter given by param_num\n\n Parameters\n ----------\n\n param_num : float\n The parameter thats confidence interval is desired\n\n sig : float\n The significance level. Default is .05\n\n upper_bound : float\n Tha mximum value the upper limit can be. Default is the\n 99.9% confidence value under OLS assumptions.\n\n lower_bound : float\n The minimum value the lower limit can be. Default is the 99.9%\n confidence value under OLS assumptions.\n\n method : string\n Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The\n optimization method that optimizes over nuisance parameters.\n Default is 'nm'\n\n Returns\n -------\n\n ci : tuple\n The confidence interval\n\n See Also\n --------\n\n el_test\n\n Notes\n -----\n\n This function uses brentq to find the value of beta where\n test_beta([beta], param_num)[1] is equal to the critical\n value.\n\n The function returns the results of each iteration of brentq at\n each value of beta.\n\n The current function value of the last printed optimization\n should be the critical value at the desired significance level.\n For alpha=.05, the value is 3.841459.\n\n To ensure optimization terminated successfully, it is suggested to\n do test_beta([lower_limit], [param_num])\n\n If the optimization does not terminate successfully, consider switching\n optimization algorithms.\n\n If optimization is still not successful, try changing the values of\n start_int_params. If the current function value repeatedly jumps\n from a number between 0 and the critical value and a very large number\n (>50), the starting parameters of the interior minimization need\n to be changed.\n \"\"\"\n r0 = chi2.ppf(1 - sig, 1)\n if upper_bound is None:\n upper_bound = self.conf_int(.01)[param_num][1]\n if lower_bound is None:\n lower_bound = self.conf_int(.01)[param_num][0]\n f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),\n method=method,\n stochastic_exog=stochastic_exog)[0]-r0\n lowerl = optimize.brenth(f, lower_bound,\n self.params[param_num])\n upperl = optimize.brenth(f, self.params[param_num],\n upper_bound)\n # ^ Seems to be faster than brentq in most cases\n return (lowerl, upperl)\n\n\nclass RegressionResultsWrapper(wrap.ResultsWrapper):\n\n _attrs = {\n 'chisq' : 'columns',\n 'sresid' : 'rows',\n 'weights' : 'rows',\n 'wresid' : 'rows',\n 'bcov_unscaled' : 'cov',\n 'bcov_scaled' : 'cov',\n 'HC0_se' : 'columns',\n 'HC1_se' : 'columns',\n 'HC2_se' : 'columns',\n 'HC3_se' : 'columns'\n }\n\n _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,\n _attrs)\n\n _methods = {\n 'norm_resid' : 'rows',\n }\n\n _wrap_methods = wrap.union_dicts(\n base.LikelihoodResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(RegressionResultsWrapper,\n RegressionResults)\n\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n data = sm.datasets.longley.load()\n data.exog = add_constant(data.exog, prepend=False)\n ols_results = OLS(data.endog, data.exog).fit() #results\n gls_results = GLS(data.endog, data.exog).fit() #results\n print(ols_results.summary())\n tables = ols_results.summary(returns='tables')\n csv = ols_results.summary(returns='csv')\n\"\"\"\n Summary of Regression Results\n=======================================\n| Dependent Variable: ['y']|\n| Model: OLS|\n| Method: Least Squares|\n| Date: Tue, 29 Jun 2010|\n| Time: 22:32:21|\n| # obs: 16.0|\n| Df residuals: 9.0|\n| Df model: 6.0|\n===========================================================================\n| coefficient std. error t-statistic prob.|\n---------------------------------------------------------------------------\n| x1 15.0619 84.9149 0.1774 0.8631|\n| x2 -0.0358 0.0335 -1.0695 0.3127|\n| x3 -2.0202 0.4884 -4.1364 0.002535|\n| x4 -1.0332 0.2143 -4.8220 0.0009444|\n| x5 -0.0511 0.2261 -0.2261 0.8262|\n| x6 1829.1515 455.4785 4.0159 0.003037|\n| const -3482258.6346 890420.3836 -3.9108 0.003560|\n===========================================================================\n| Models stats Residual stats |\n---------------------------------------------------------------------------\n| R-squared: 0.995479 Durbin-Watson: 2.55949 |\n| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |\n| F-statistic: 330.285 Prob(Omnibus): 0.687765 |\n| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |\n| Log likelihood: -109.617 Prob(JB): 0.838294 |\n| AIC criterion: 233.235 Skew: 0.419984 |\n| BIC criterion: 238.643 Kurtosis: 2.43373 |\n---------------------------------------------------------------------------\n\"\"\"\n\n","repo_name":"yarikoptic/pystatsmodels","sub_path":"statsmodels/regression/linear_model.py","file_name":"linear_model.py","file_ext":"py","file_size_in_byte":61033,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"62"} +{"seq_id":"19618117901","text":"from utils import mnist_reader\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport knn\nimport cnn_tensorflow as cnn\n\n# Importing data from local files\nX_train, y_train = mnist_reader.load_mnist('data/fashion', kind='train')\nX_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\nprint(\"---------------------------------------------------------------------\")\nprint(\"| Dimensions of Train Set\")\nprint(\"| Dimension(X_train)=\", np.shape(X_train))\nprint(\"| There are\", np.shape(X_train)[0], \"images where each image is\", np.shape(X_train)[1:], \"pixels in size\")\nprint(\"| There are\", np.shape(np.unique(y_train))[0], \"unique image labels\")\nprint(\"---------------------------------------------------------------------\")\nprint(\"| Dimensions of Test Set\")\nprint(\"| Dimension(X_test)=\", np.shape(X_test), \"Dimension(y_test)=\", np.shape(y_test)[0])\nprint(\"---------------------------------------------------------------------\")\n\n\ndef show_sample_dataset():\n \"\"\"\n Showing first 40 images of train dataset\n \"\"\"\n xTrain = np.reshape(X_train, (np.shape(X_train)[0], 28, 28))\n plt.figure(figsize=(14, 10))\n plt.style.use('default')\n\n for i in range(40):\n plt.subplot(5, 8, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(xTrain[i], cmap=plt.cm.gist_yarg)\n plt.xlabel(class_names[y_train[i]])\n\n plt.draw()\n plt.waitforbuttonpress()\n plt.close()\n\n\ndef knn_algorithm():\n\n start = time.time()\n\n # Running the KNN algorithm\n (best_err, best_k, errors) = knn.model_selection_knn(X_test, X_train, y_test, y_train, range(1, 15))\n\n end = time.time()\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n # Presenting results\n print(\"Time: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds))\n print(\"Best k: {}\".format(best_k))\n print(\"Best (lowest) error: {}\".format(best_err))\n print(\"Accuracy: {}\".format(1 - best_err))\n print(\"Error table for each k: \", errors)\n print(\"\\n\\n------------------- CLOSE THE CHART TO CONTINUE--------------------\")\n\n # Plotting the result\n knn.plot_knn_errors(errors)\n\n\ndef cnn_tensorflow():\n\n start = time.time()\n\n # Running the CNN Tensorflow algorithm\n cnn.show_sample_dataset(X_train, y_train)\n (xTrain, yTrain, xVal, yVal, xTest, yTest) = cnn.prepare_dataset(X_train, y_train, X_test, y_test)\n model = cnn.cnn_model()\n train_model = cnn.run_model(model, xTrain, yTrain, xVal, yVal, xTest, yTest, class_names)\n\n end = time.time()\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"Time: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds))\n print(\"\\n\\n------------------- PRESS ANY KEY TO CONTINUE--------------------\")\n\n # Plotting results\n cnn.plot_model_evaluation(train_model)\n\n\nif __name__ == \"__main__\":\n\n show_sample_dataset()\n\n print(\"\\n---------------\")\n print(\"| RUNNING KNN |\")\n print(\"---------------\")\n knn_algorithm()\n\n print(\"\\n--------------------------\")\n print(\"| RUNNING CNN TENSORFLOW |\")\n print(\"--------------------------\")\n cnn_tensorflow()\n","repo_name":"sm00k3y/Fashion-MNIST-recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17685656794","text":"import sys\r\nimport csv\r\nfrom decimal import Decimal\r\n\r\ntimestep = list()\r\nvacf = list()\r\n\r\nfileName = sys.argv[1]\r\n\r\nwith open(fileName) as csv_file:\r\n csvReader = csv.reader(csv_file, delimiter=',')\r\n line = 0\r\n for row in csvReader:\r\n if(line != 0):\r\n timestep.append(int(row[0]))\r\n vacf.append(float(row[1]))\r\n line += 1\r\n else:\r\n line += 1\r\n\r\ntrapAreas = 0\r\nfor x in range(len(vacf)-1):\r\n trapAreas += (vacf[x] + vacf[x+1])\r\n\r\narea = 10*trapAreas/2\r\n\r\ndiffCoeff = area/3\r\n\r\ndiffCoeff = diffCoeff * 1e-7\r\n\r\nprint('Coefficient of diffusion: ', '%.2E'%Decimal(str(diffCoeff)))\r\n\r\n","repo_name":"parthvshah/VACF","sub_path":"utils/diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23306667422","text":"import os\nimport json\nimport configparser\nimport numpy as np\nimport pandas as pd\n\nimport scipy.stats\n\nfrom lib.diagnostics import *\nfrom lib.generic import *\n\n\n# ================================================================= #\n# Epoch related functions\n#\n# ================================================================= #\n\n\ndef save_simulation(t0, h_dt, v_dt, t_h, t_v, out_dir):\n \"\"\"\n Save the current state of the simulation\n \n Parameters\n t0 : float\n Current time of the simulation in days\n h_dt : dict\n keys : int\n Index of host.\n values : ndarray, float32, shape (nph, nsnps)\n Parasite genomes infecting host.\n v_dt : dict\n keys : int\n Index of vector.\n values : ndarray, float32, shape (npv, nsnps)\n Parasite genomes infecting vector.\n t_h : ndarray, float, shape(n_hosts)\n Array giving the last time (in days) that\n a given host's state was updated.\n t_v : ndarray, float, shape(n_vectors)\n Array giving the last time (in days) that\n a given vector's state was updated.\n Returns\n Saves the state of the simulation in six arrays:\n \n h_ixs : ndarray, int, shape (n_inf_hosts, )\n Indexes of the infected hosts, corresponding to\n `t_h`.\n h_genomes : ndarray, float, shape (nph, nsnps, n_inf_hosts)\n The genetic material carried by each infected host. Note\n that hosts are indexed by the last dimension.\n t_h : as in Parameters\n v_ixs : same as h_ixs but for vectors\n v_genomes : same as h_genomes but for vectors\n t_v : as in Parameters\n \n The time is saved as a dictionary `t0.json`.\n \n \"\"\"\n \n # Save the current time\n json.dump({\"t0\" : t0}, open(os.path.join(out_dir, \"t0.json\"), \"w\"), default=default)\n \n # Save the time since last update\n np.save(os.path.join(out_dir, \"t_h.npy\"), t_h)\n np.save(os.path.join(out_dir, \"t_v.npy\"), t_v)\n \n # Save the host state\n h_ixs = []\n h_genomes = []\n for ix, genome in h_dt.items():\n h_ixs.append(ix)\n h_genomes.append(genome)\n h_ixs = np.array(h_ixs)\n h_genomes = np.dstack(h_genomes)\n np.save(os.path.join(out_dir, \"h_ixs.npy\"), h_ixs)\n np.save(os.path.join(out_dir, \"h_genomes.npy\"), h_genomes)\n \n # Save the vector state\n v_ixs = []\n v_genomes = []\n for ix, genome in v_dt.items():\n v_ixs.append(ix)\n v_genomes.append(genome)\n v_ixs = np.array(v_ixs)\n v_genomes = np.dstack(v_genomes)\n np.save(os.path.join(out_dir, \"v_ixs.npy\"), v_ixs)\n np.save(os.path.join(out_dir, \"v_genomes.npy\"), v_genomes)\n \n return 0\n\n\ndef parse_parameters(config):\n \"\"\"\n Pass the parameters in an `.ini` file\n specified by a `config` file\n \n TODO\n - Could check that the length of all of these is correct\n \n Parameters\n config : ConfigParser class\n Object that contains all of the simulation\n parameters (configuration values) loaded \n from the '.ini' file.\n Returns\n params : dict\n Dictionary of all parameter values\n required for the simulation.\n \n \"\"\"\n \n params = {}\n \n demography = {param: int(val) for param, val in config.items('Demography')}\n transmission = {param: float(val) for param, val in config.items('Transmission')}\n genome = {param: int(val) for param, val in config.items('Genome')}\n evolution = {param: float(val) for param, val in config.items('Evolution')}\n \n params.update(demography)\n params.update(transmission)\n params.update(genome)\n params.update(evolution)\n \n return params\n\n\ndef update_vectors(nv, v, t_v, v_dt):\n \"\"\"\n Update the current vector population such that\n the number of vectors `nv` and the vector population\n state data structures (`v`, `t_v` and `v_dt`) match\n \n This may involve either killing or creating vectors.\n \n Parameters\n nv: int\n The number of vectors that the simulation *should*\n currently contain; i.e. the number given by the\n parameter file as `params['nv']`. Note this may\n change as the simulation passes through Epochs.\n v: ndarray, int8, shape(n_vectors)\n The infection status of all vectors.\n t_v: ndarray, float32, shape(n_vectors)\n The last time each vectors infection was\n updated.\n v_dt: dict, shape(n_infected_vectors)\n keys: int\n Indices for infected vectors.\n values: ndarray, float32, shape(npv, nsnps)\n Parasite genomes held by infected vectors.\n \n Returns:\n v: ndarray, int8, shape(n_vectors)\n The infection status of all vectors.\n t_v: ndarray, float32, shape(n_vectors)\n The last time each vectors infection was\n updated.\n v_dt: dict, shape(n_infected_vectors)\n keys: int\n Indices for infected vectors.\n values: ndarray, float32, shape(npv, nsnps)\n Parasite genomes held by infected vectors.\n \n \"\"\"\n \n nv = int(nv)\n if nv > len(v): # Create vectors\n n_missing_v = nv - len(v)\n v = np.concatenate((v, np.zeros(n_missing_v, dtype='int8')))\n t_v = np.concatenate((t_v, np.zeros(n_missing_v)))\n \n elif nv < len(v): # Kill vectors\n v = v[:nv] # Random order, so this is a random subset\n v_dt = {ix: genomes for ix, genomes in v_dt.items() if ix < nv}\n t_v = t_v[:nv]\n \n return v, t_v, v_dt\n\n\n# ================================================================= #\n# class Epoch and Epochs\n# \n#\n# ================================================================= #\n\n\nclass Epoch(object):\n \"\"\"\n Store information about a single Epoch\n in fwd-dream \n \n Example section from an `params_.ini`:\n \n [Epoch_Crash]\n duration = 36500\n adj_params = gamma\n adj_vals = 0.012195\n approach = logistic\n approach_t = 30\n div_samp_freq = 5\n div_samp_t = 365\n prev_samp_freq = 5\n prev_samp_t = 365\n calc_genetics = True\n save_state = True\n \n Example Usage:\n \n epoch = Epoch(config, \"Epoch_Crash\")\n epoch.set_params(entry_params)\n epoch.set_timings(start_time)\n epoch.set_approach()\n epoch.set_sampling()\n \n \n \"\"\"\n \n def __init__(self, config, section):\n \n # Epoch name\n self.config = config\n self.section = section\n \n if not section.startswith(\"Epoch_\"):\n raise ValueError(\"Epochs sections must begin with 'Epoch_'.\")\n \n self.name = section.split(\"_\")[1]\n \n # Epoch time \n self.duration = eval(config.get(section, \"duration\")) # evalute to parse 'None'\n self.t0 = None\n self.tdelta = None\n self.t1 = None\n\n # Epoch entry and equilibrium parameters\n self.begun = False\n self.entry_params = None\n self.epoch_params = None\n self.x_h = None\n self.x_v = None\n \n # Parameter changes entering epoch\n self.adj_keys = [s.strip() for s in config.get(section, \"adj_params\").split(\",\")]\n self.adj_vals = [float(val) for val in config.get(section, \"adj_vals\").split(\",\")]\n self.adj_params = {key: val for key, val in zip(self.adj_keys, self.adj_vals)}\n \n # Timing of parameter changes\n self.approach = [s.strip() for s in config.get(section, \"approach\").split(\",\")]\n self.approach_ts = [float(val) for val in config.get(section, \"approach_t\").split(\",\")]\n self.approach_t1 = None # The last time we will update parameters\n self.tparam = None # Last time the parameters were updated\n self.param_update_freq = None\n \n if not len(self.adj_params) == len(self.adj_vals):\n raise ValueError(\"The number of parameters adjusted by `adj_params` must equal\" + \\\n \"the number of values given by `adj_vals`.\")\n \n if not len(self.approach) == len(self.approach_ts):\n raise ValueError(\"The number of approach functions given by `approach` must equal\" + \\\n \"the number of approach times given by `approach_ts`.\")\n \n # Longitudinal sampling of genetic diversity\n self.adj_div_samp = None\n self.div_samp_freq = None\n self.div_samp_t = None\n \n # Longitudinal sampling of prevalence\n self.adj_prev_samp = None\n self.prev_samp_freq = None\n self.prev_samp_t = None\n \n # Storage\n self.calc_genetics = config.getboolean(section, \"calc_genetics\")\n self.save_state = config.getboolean(section, \"save_state\")\n \n \n def set_params(self, entry_params):\n \"\"\"\n Set entry parameters and equilibrium parameters\n for the Epoch\n \n Parameters\n entry_params: dict\n Simulation parameters upon entry to the\n epoch.\n Returns\n Null\n \n \"\"\"\n # Set entry parameters\n self.entry_params = entry_params.copy()\n \n # Compute epoch equilbrium paramters\n self.epoch_params = entry_params.copy()\n self.epoch_params.update(self.adj_params)\n \n # Compute epoch host and vector prevalence\n derived_params = calc_derived_params(self.epoch_params)\n equil_params = calc_equil_params(self.epoch_params, derived_params)\n self.x_h = calc_x_h(**equil_params)\n self.x_v = calc_x_v(**equil_params)\n \n \n def set_timings(self, start_time):\n \"\"\"\n Set the start and end time of the Epoch\n using the `start_time` and duration\n information from `self.duration`\n \n Parameters\n start_time: float\n The start time of the Epoch.\n \n Returns\n Null\n \n \"\"\"\n if self.entry_params is None:\n raise ValueError(\"Must run `.set_params()` before running `.set_timings()`.\")\n \n # Start time\n self.t0 = start_time\n \n # Duration\n if self.duration is None:\n # Calculate approximate equilibrium time\n derived_params = calc_derived_params(self.epoch_params)\n approx_ne = self.x_h * self.epoch_params[\"nh\"]\n approx_generation_t = derived_params[\"h_v\"] + derived_params[\"v_h\"]\n self.tdelta = 4.21 * approx_ne * approx_generation_t # Covers TMRCA 95% of time\n else:\n self.tdelta = int(self.duration) # assuming an int has been passed\n \n # End time\n self.t1 = self.t0 + self.tdelta\n \n \n def set_approach(self, n_updates=50.0):\n \"\"\"\n Prepare the approach times for the Epoch's parameter \n changes; the time over which they transition from their \n entry and adjusted values.\n \n Parameters\n n_updates: int\n The number\n \n Returns\n Null\n \n \"\"\"\n if self.t0 is None:\n raise ValueError(\"Must run `.set_timings()` before running `.set_approach()`.\")\n \n # Generate a dictionary that holds functions for each parameter to be updated,\n # These functions return the parameter's value at a given time.\n self.approach_funcs = {key: self.gen_approach_func(key, a, a_t)\n for (key, a, a_t) in zip(self.adj_params,\n self.approach,\n self.approach_ts)}\n \n # We don't continuously update parameters, but at a frequency defined below\n self.approach_t1 = self.t0 + max(self.approach_ts)\n self.param_update_freq = max(self.approach_ts) / n_updates\n \n \n def set_sampling(self):\n \"\"\"\n Set the sampling rate of prevalence and\n genetic diversity data during the Epoch\n \n Parameters\n Null\n Returns\n Null\n \n \"\"\"\n if self.t1 is None:\n raise ValueError(\"Must run `.set_timings()` before running `.set_sampling()`.\")\n \n # Prevalence\n self.adj_prev_samp = self.config.has_option(self.section, \"prev_samp_freq\")\n if self.adj_prev_samp:\n self.prev_samp_freq = self.config.getfloat(self.section, \"prev_samp_freq\")\n if self.config.has_option(self.section, \"prev_samp_t\"):\n self.prev_samp_t = self.config.getfloat(self.section, \"prev_samp_t\")\n else:\n self.prev_samp_t = self.tdelta # until end of epoch\n \n # Diversity\n self.adj_div_samp = self.config.has_option(self.section, \"div_samp_freq\")\n if self.adj_div_samp:\n self.div_samp_freq = self.config.getfloat(self.section, \"div_samp_freq\")\n if self.config.has_option(self.section, \"div_samp_t\"):\n self.div_samp_t = self.config.getfloat(self.section, \"div_samp_t\")\n else:\n self.div_samp_t = self.tdelta # until end of epoch\n \n \n def gen_approach_func(self, key, approach, approach_t):\n \"\"\"\n Generate functions that define gradual updates\n \n Parameters:\n key: str\n Parameter for which we will generate an\n update function.\n approach: str\n The functional form that will be used to\n set the parameter updates. Can be of\n step, linear, logisitic.\n approach_t: float\n The time frame over which the parameter\n will be updated.\n \n Returns\n approach_function: function\n This is a function that, given a time,\n will return the parameter value.\n\n \"\"\"\n entry_val = self.entry_params[key]\n epoch_val = self.epoch_params[key]\n\n if approach == \"step\":\n def approach_func(t):\n if t <= self.t0 + approach_t/2:\n val = entry_val\n else:\n val = epoch_val\n return val if key != (\"nv\" or \"nh\") else int(val)\n\n elif approach == \"linear\":\n def approach_func(t):\n if t <= self.t0:\n val = entry_val\n elif t > self.t0 + approach_t:\n val = epoch_val\n else:\n b = entry_val\n m = (epoch_val - entry_val)/approach_t\n val = b + m * (t - self.t0)\n return val if key != (\"nv\" or \"nh\") else int(val)\n\n elif approach == \"logistic\":\n def approach_func(t, correct=False):\n mu = self.t0 + approach_t / 2\n n_sds = 10.0\n scale = approach_t / n_sds\n unscaled_func = scipy.stats.logistic(mu, scale)\n val = entry_val + (epoch_val - entry_val) * unscaled_func.cdf(t)\n if correct: # logistic is asymptotic, linear adjustment to get boundaries exact\n offset = (epoch_val - entry_val) * unscaled_func.cdf(self.t0)\n m = 2 * offset / approach_t\n val += m * (t - mu)\n return val if key != (\"nv\" or \"nh\") else int(val)\n else:\n raise ValueError(\"Approach is unrecognized. Must be one of: .\")\n\n return approach_func\n \n \n def adjust_params(self, t):\n \"\"\"\n This method will determine whether or not it's time to \n update the parameters again\n \n Parameters\n t : float\n Current time in the simulation in days.\n Returns\n _ : bool\n True if parameters should be updated.\n \n \"\"\"\n if (t - self.t0) >= self.approach_t1:\n return False\n elif (t - self.tparam) >= self.param_update_freq:\n return True\n else:\n return False\n \n \n def get_params(self, t):\n \"\"\"\n Return the value of all adjusted parameters\n at time `t`, as a dictionary\n \n Parameters\n t : float\n Current time in the simulation in days.\n Returns\n _ : dict\n keys : str, parameter names\n values : appropriate value of parameter at time t\n \n \"\"\"\n self.tparam = t\n return {key: f(t) for key, f in self.approach_funcs.items()}\n\n \nclass Epochs(object):\n \"\"\"\n Co-ordinate multiple Epoch classes\n \n \"\"\"\n def __init__(self, params, config):\n \n # Parse\n self.params = params.copy() # Need to copy, as will change during simulation\n self.config = config\n \n # Define initialisation variables\n self.derived_params = calc_derived_params(self.params)\n self.equil_params = calc_equil_params(self.params, self.derived_params)\n self.init_x_h = calc_x_h(**self.equil_params)\n self.init_x_v = calc_x_v(**self.equil_params)\n \n # Coordinate the Epochs\n self.init_duration = None\n self.epoch_sections = None\n self.exist = None # Are there any Epochs?\n self.max_t0 = None # What is the total runtime in days\n self.current = None # Points to the current epoch\n\n \n def set_initialisation(self, verbose=False):\n \"\"\"\n Set the initialisation duration of the simulation\n \n Parameters\n verbose : bool\n Returns\n Null\n \"\"\"\n \n self.init_duration = eval(self.config.get('Options', 'init_duration'))\n if self.init_duration is None:\n if verbose:\n print(\"Initialising simulation to approximate equilibrum.\")\n ne = self.init_x_h*self.params['nh']\n g = (self.derived_params['h_v'] + self.derived_params['v_h']) \n time_to_equil = 4.21 * ne * g # Covers TMRCA 95% of time\n self.init_duration = time_to_equil\n else:\n if verbose:\n print(\"Initialising simulation to a user-specified duration.\")\n if verbose:\n print(\" Initialisation duration: %d days = %d years\" % (self.init_duration, self.init_duration/365))\n \n \n def prepare_epochs(self, verbose=False):\n \"\"\"\n With this method we will prepare all of the epochs\n \n Parameters\n verbose : bool\n Returns\n Null\n \"\"\"\n \n # Collect 'Epoch_' sections\n self.epoch_sections = [s for s in self.config.sections() if \"Epoch\" in s]\n \n # If Epochs exist, prepare them\n if len(self.epoch_sections) > 0:\n self.exist = True\n self.epochs = [Epoch(self.config, s) for s in self.epoch_sections]\n if verbose: print(\"Epochs\")\n for (i, epoch) in enumerate(self.epochs):\n if i == 0:\n epoch.set_params(self.params)\n epoch.set_timings(self.init_duration) # begins at end of initialization\n epoch.set_approach()\n epoch.set_sampling()\n else:\n epoch.set_params(entry_params=self.epochs[i-1].epoch_params)\n epoch.set_timings(start_time=self.epochs[i-1].t1) # begins at end `.t1` of previous epoch\n epoch.set_approach()\n epoch.set_sampling()\n if verbose:\n print(\" \", i+1, \":\", epoch.name)\n print(\" Begins: %d, Ends: %d\" % (epoch.t0, epoch.t1))\n print(\" Duration: %d days = %d years\" % (epoch.tdelta, epoch.tdelta/365))\n print(\" Adjusting Parameter(s):\", epoch.adj_keys)\n print(\" To Value(s):\", epoch.adj_vals)\n print(\" via.:\", epoch.approach)\n print(\" Approach Time(s):\", epoch.approach_ts)\n print(\" Host Prevalence: %.03f, Vector: %.03f\" % (epoch.x_h, epoch.x_v))\n print(\" Adjust Prevalence Sampling:\", epoch.adj_prev_samp)\n if epoch.adj_prev_samp:\n print(\" ...to every %d days for %d days.\" \\\n % (epoch.prev_samp_freq, epoch.prev_samp_t))\n print(\" Adjust Diversity Sampling:\", epoch.adj_div_samp)\n if epoch.adj_div_samp:\n print(\" ...to every %d days for %d days.\" \\\n % (epoch.div_samp_freq, epoch.div_samp_t))\n \n self.max_t0 = self.epochs[-1].t1 # the end of the simulation\n if verbose:\n print(\" Total Duration: %d days = %d years\" % (self.max_t0, self.max_t0/365))\n \n def update_time(self, t):\n \"\"\"\n Check if current epoch needs to be changed,\n given the time `t`\n \n If we have passed the start time of an Epoch,\n but it has not yet begun, we assign it as\n the current epoch.\n \n \"\"\"\n \n for epoch in self.epochs:\n if t > epoch.t0 and not epoch.begun:\n self.current = epoch\n \n \n def write_epochs(self, out_dir, verbose=False):\n \"\"\"\n Write a dataframe `epoch_df.csv`, each row\n of which contains information about and Epoch\n within Epochs\n \n Parameters\n out_dir : str\n Path to output direcftory.\n verbose : bool\n Print to stdout?\n Returns\n Null\n \n \"\"\"\n \n if self.exist:\n print(\"Writing Epochs dataframe...\")\n\n epoch_dt = {\n \"name\": [\"init\"],\n \"t0\": [0],\n \"t1\": [self.init_duration],\n \"param\": [\"\"],\n \"val\": [\"\"],\n \"x_h\": [self.init_x_h],\n \"x_v\": [self.init_x_v]}\n \n for epoch in self.epochs:\n epoch_dt[\"name\"].append(epoch.name)\n epoch_dt[\"t0\"].append(epoch.t0)\n epoch_dt[\"t1\"].append(epoch.t1)\n epoch_dt[\"param\"].append(epoch.adj_keys)\n epoch_dt[\"val\"].append(epoch.adj_vals)\n epoch_dt[\"x_h\"].append(epoch.x_h)\n epoch_dt[\"x_v\"].append(epoch.x_v)\n \n epoch_df = pd.DataFrame(epoch_dt)\n epoch_df.to_csv(os.path.join(out_dir, \"epoch_df.csv\"), index=False)\n print(\"Done.\")\n print(\"\")\n else:\n print(\"No Epochs to write.\")\n\n ","repo_name":"JasonAHendry/fwd-dream","sub_path":"lib/epochs.py","file_name":"epochs.py","file_ext":"py","file_size_in_byte":23134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"9876850328","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 8/25/21\n\n@author: Chant\n\"\"\"\n\n\ndef find_max_sell(arr):\n \"\"\"\n 京东APP拥有百亿级商品。已知某日京东所有销售商品中,有一件商品荣登销量王,其销量占比超过总销量的一半,请找出这个商品。\n 假设商品销售日志每条记录一件商品,如【iPhone12,iPhone12,macbookpro16,iPhone12,红米K40】。\n\n 1,2,1,2,1 (1,0)(2,0)(1,0)(2,0)(1,0)\n 1,1,1,2,4 (1,0)(1,1)(1,2)(1,1)(1,0)\n \"\"\"\n counter = [arr[0], 0]\n for i in arr[1:]:\n prev = counter[0]\n if i == prev:\n counter[1] += 1\n else:\n if counter[1] > 0:\n counter[1] -= 1\n else:\n counter[1] = 0\n counter[0] = i\n return counter[0]\n\n\ndef online_distribute(log, login_dict, logout_dict):\n \"\"\"\n 京东APP的活跃用户有4个亿,每个用户从登陆到退出会在一个日志文件中记下登陆时间和退出时间,要求写一个算法,统计一天中京东APP的用户在线分布,粒度为秒。\n [\n (uid, login_time, logout_time),\n (uid, login_time, logout_time),\n (uid, login_time, logout_time),\n ]\n \"\"\"\n login_dic, logout_dic, = dict(), dict()\n for uid, login_time, logout_time in log:\n login_dic[login_time] = login_dic.get(login_time, 0) + 1\n logout_dic[logout_time] = logout_dic.get(logout_time, 0) + 1\n ans = []\n prev = 0\n for i in range(24 * 60 * 60):\n prev += login_dict[i] - logout_dict[i]\n ans.append(prev)\n return ans\n\n\ndef find_max_sub(nums):\n \"\"\"\n 1、给定一个正整数List,找到序列的最大和。序列中的相邻两个元素不能在原正整数数组中是相邻的。\n\n 输入: [2, 7, 9, 3, 1]\n 输出: 12\n\n 2、找到和最大的序列\n 输入: [2, 7, 9, 3, 1]\n 输出: [2, 9, 1]\n [2, 7, 9, 3, 1]\n 2, 7, 11, 11, 12\n \"\"\"\n n = len(nums)\n dp = [0] * n\n for i in range(n):\n if i == 0:\n dp[i] = nums[0]\n if i == 1:\n dp[i] = max(dp[i - 1], nums[i])\n else:\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n\n ans = []\n for i in range(2, n):\n if dp[i] != dp[i - 1]:\n ans.append(nums[i])\n if i == 2:\n if dp[i] != dp[i - 1]:\n ans.append(nums[0])\n else:\n ans.append(nums[1])\n return ans\n","repo_name":"Chant00/coding","sub_path":"其他/jd.py","file_name":"jd.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"27019026129","text":"import serial\nimport csv\n\ndef open_serial_port(port, baudrate, timeout):\n \"\"\"Open the serial port with the specified parameters.\"\"\"\n return serial.Serial(port=port, baudrate=baudrate, timeout=timeout)\n\ndef read_data(arduino, num_samples):\n \"\"\"Read data from the serial port and store it in a list.\"\"\"\n data = []\n for _ in range(num_samples):\n data.append(str(arduino.readline()))\n return data\n\ndef clean_data(data):\n \"\"\"Clean the data by removing unnecessary characters.\"\"\"\n cleaned_data = []\n for item in data:\n cleaned_data.append(item[2:-5])\n return cleaned_data\n\ndef write_data_to_csv(data, file_path):\n \"\"\"Write the cleaned data to a CSV file.\"\"\"\n with open(file_path, mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['hart'])\n for item in data:\n writer.writerow([item])\n\nif __name__ == \"__main__\":\n arduino = open_serial_port(port=\"COM3\", baudrate=9600, timeout=1)\n raw_data = read_data(arduino, num_samples=10)\n print(raw_data)\n cleaned_data = clean_data(raw_data)\n print(cleaned_data)\n write_data_to_csv(cleaned_data, file_path=\"Sample_Dataset.csv\")","repo_name":"hamidrezamaneshti/HeathCare-monitoring-IoT-Based","sub_path":"GetingRAWdata.py","file_name":"GetingRAWdata.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18055940674","text":"from abc import abstractmethod, ABC\nfrom typing import List, Optional\n\nimport aiohttp as aiohttp\nfrom bs4 import BeautifulSoup\n\nfrom utils.common import log_exception, post\n\n\ndef create_regex_dict(timeout_secs: int = 10):\n \"\"\"RegexDictionary factory\"\"\"\n return ViscaRegexDictionary(timeout_secs=timeout_secs)\n\n\nclass RegexDictionary(ABC):\n\n def __init__(self, timeout_secs: int):\n self.timeout_secs = timeout_secs\n\n # noinspection PyTypeChecker\n @abstractmethod\n async def get_word_list(self, pattern) -> Optional[List[str]]:\n \"\"\"Returns a list of dictionary words matching the pattern\n given by `pattern`.\"\"\"\n log_exception(__name__, NotImplementedError())\n return None\n\n\nclass ViscaRegexDictionary(RegexDictionary):\n\n def __init__(self, timeout_secs: int):\n super().__init__(timeout_secs)\n self._url = \"https://www.visca.com/regexdict/\"\n\n async def get_word_list(self, pattern) -> Optional[List[str]]:\n \"\"\"Returns a list of dictionary words matching the pattern\n given by `pattern`.\"\"\"\n async with aiohttp.ClientSession() as session:\n data = {\n 'str': f'{pattern}',\n 'fstr': '',\n 'ifun': 'if',\n 'ccg': 'all',\n 'search': 'Search'}\n try:\n html = await post(\n session, self._url, data=data,\n timeout=self.timeout_secs, ssl=False)\n except Exception as e:\n log_exception(__name__, e)\n else:\n soup = BeautifulSoup(html, features='html.parser')\n a_texts = []\n for a in soup.find_all('a'):\n if 'http://www.yourdictionary.com/' in a.attrs['href']:\n a_texts.append(a.text)\n return a_texts\n return None\n","repo_name":"timurhamzin/wordle_telegram_bot","sub_path":"wordle/regex_dict.py","file_name":"regex_dict.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72268880516","text":"\nfrom tkinter import *\n\nfrom matplotlib.pyplot import text\n\n\nroot = Tk()\n\n\ndef myClick():\n myLabel = Label(root, text=\"我按了这个按键\")\n myLabel.pack()\n # myLabel.grid(row=1,colcum=1)\n\n\nmyButton = Button(root, text=\"按键\", padx=50,pady=10,command=myClick, fg=\"green\",bg=\"#ff0000\")\nmyButton.pack()\n\n\nroot.mainloop()","repo_name":"zuqingxie/ROS_learning","sub_path":"Tkinter/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30336506291","text":"# D - Flipping Signs\n# ----------------------------------------\n# 問題\n# https://atcoder.jp/contests/abc125/tasks/abc125_d\n# 解説\n# https://img.atcoder.jp/abc125/editorial.pdf\n\n# むずい\n\n# AC (解説)\n# ----------------------------------------\n\n# DPで解く。これができれば強い\n\nN = int(input())\nA = list(map(int, input().split()))\n\nINF = 1e10\ndp0 = [-INF for _ in range(N+1)] # dp0[i] := i-1, i個目をひっくり返さない\ndp1 = [-INF for _ in range(N+1)] # dp1[i] := i-1, i個目をひっくり返す\ndp0[0] = 0\n\n# 更新\nfor i in range(N):\n dp0[i+1] = max(\n dp0[i] + A[i], # そのまま足す\n dp1[i] - A[i] # ひっくり返して足す\n )\n\n dp1[i+1] = max(\n dp0[i] - A[i], # ひっくり返して足す\n dp1[i] + A[i] # そのまま足す\n )\n\nprint(dp0[-1])\n","repo_name":"kentakom1213/kyopro","sub_path":"atcoder_training/abc125/D_FlippingSigns_DP.py","file_name":"D_FlippingSigns_DP.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"5315452386","text":"# Build paths inside the project like this: path.join(BASE_DIR, ...)\nfrom os import path\n\n\nPROJECT_DIR = path.dirname(path.dirname(path.abspath(__file__)))\nBASE_DIR = path.dirname(PROJECT_DIR)\n\nSITE_ID = 1\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\nSTATICFILES_DIRS = [path.join(PROJECT_DIR, 'static')]\nSTATIC_ROOT = path.join(BASE_DIR, 'public/static')\nMEDIA_ROOT = path.join(BASE_DIR, 'public/media')\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [path.join(PROJECT_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.template.context_processors.i18n',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nAUTH_USER_MODEL = 'uzantoj.Uzanto'\nLOGIN_URL = '/uzanto/konekti/'\nLOGIN_REDIRECT_URL = '/'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'esperantio',\n }\n}\n\nADMINS = (\n ('Baptiste Darthenay', 'bonvenon@esperant.io'),\n)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.flatpages',\n 'django_extensions',\n 'django_gravatar',\n 'django_countries',\n 'markitup',\n 'markdown_deux',\n 'braces',\n 'taggit',\n 'leaflet',\n 'uzantoj',\n 'eventoj',\n 'organizoj',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'\n)\n\n\nROOT_URLCONF = 'esperantio.urls'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'eo'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLEAFLET_CONFIG = {\n 'DEFAULT_CENTER': (45.1, 3.9),\n 'DEFAULT_ZOOM': 4,\n 'MIN_ZOOM': 3,\n 'MAX_ZOOM': 16,\n 'TILES': 'http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',\n # 'TILES': 'http://api.tiles.mapbox.com/v3/batisteo.hknl8e1c/{z}/{x}/{y}.png',\n 'ATTRIBUTION_PREFIX': 'Mapaj datumoj © OpenStreetMap kontribuantoj',\n 'RESET_VIEW': False,\n}\n\nGRAVATAR_SECURE = True\nGRAVATAR_DEFAULT_IMAGE = \"mm\"\n\nMARKITUP_SET = 'markitup/sets/markdown'\nMARKITUP_SKIN = 'markitup/skins/simple'\nMARKITUP_FILTER = ('markdown2.markdown', {'safe_mode': True})\n","repo_name":"batisteo/esperantio","sub_path":"esperantio/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"18554787636","text":"import torch\nimport numpy as np\nimport logging\nimport models, os\nfrom work import *\nfrom torch.autograd import Variable\n\n\n# [cyp2c9, vkorc1]\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\ntarget_str = \"vkorc1\"\ndata_folder = 'data'\nsave_path = './checkpoint'\n\n# [reg, vib]\nmodel_name = 'reg'\natt_epochs = 1000\natt_lr = 2e-1\neps=0.4\nt_val_min=-1\nt_val_max=1\n\ndef get_logger():\n logger_name = \"main-logger\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler()\n fmt = \"[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s\"\n handler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(handler)\n return logger\n\n\ndef main():\n # [0, 9, 16]\n global logger\n logger = get_logger()\n\n logger.info(\"=> load data ...\")\n x, y, featnames = load_iwpc(data_folder)\n y = trans_project(y)\n\n t, target_cols = extract_target(x, target_str, featnames)\n\n target_model = models.MLP(input_dim=x.shape[1]).cuda()\n ckpt_name = './checkpoint/model_latest.pth'\n if os.path.isfile(ckpt_name):\n checkpoint = torch.load(ckpt_name)\n target_model.load_state_dict(checkpoint['state_dict'])\n logger.info(\"=> loaded target model checkpoint '{}'\".format(ckpt_name))\n else:\n logger.info(\"=> no checkpoint found at '{}'\".format(ckpt_name))\n\n logger.info(\"=> begin attacking ...\")\n \n \n target_model.eval()\n\n if issparse(x): #deal with sparse matrices correctly\n stack = vstack\n else:\n stack = np.stack\n\n assert len(target_cols) > 0\n one_hot = (len(target_cols) > 1) #whether the target attribute was one-hot encoded (binary otherwise)\n logger.info(\"=> target attribute is one-hot? {}\".format(one_hot))\n num_variants = len(target_cols) if one_hot else 2 #number of possible values of the targ\n guesses = []\n \n \n for i in range(x.shape[0]): #iterate over the rows of X and y\n row_x = stack([x[i] for _ in range(num_variants)]) #create copies of x[i]\n if one_hot:\n row_x[:, target_cols] = np.eye(num_variants) #fill in with all possible values of target (one-hot encoded)\n else: #fill in with all possible values of target (binary)\n row_x[0, target_cols] = 0\n row_x[1, target_cols] = 1\n \n row_y = np.repeat(y[i], num_variants)\n row_y = torch.from_numpy(row_y).float().cuda()\n row_x = torch.from_numpy(trans_norm(row_x)).float().cuda()\n \n Ipp = torch.eye(row_x.shape[1]).float().cuda()\n lam = 0.0001\n tmp1 = torch.inverse(row_x.t().mm(row_x) + lam * Ipp) # Ridge regression estimator, Hoerl 1970\n # tmp1 = torch.pinverse(row_x.t().mm(row_x)) # use psudo inverse instead: x^Tx is singular\n tmp2 = row_x.t().mm(row_y.unsqueeze(1))\n c_bar = tmp1.mm(tmp2)\n h_adv = row_x.mm(c_bar)\n cost = ((row_y.unsqueeze(1) - h_adv) ** 2)\n # cost = (row_y.unsqueeze(1) - h_adv).abs()\n target_out = target_model(row_x)\n true_cost = ((row_y.unsqueeze(1) - target_out) ** 2)\n # true_cost = (row_y.unsqueeze(1) - target_out).abs()\n loss = (true_cost - cost).abs()\n guess = torch.argmin(loss).cpu().numpy()\n guesses.append(guess)\n # if i == 4780: import pdb; pdb.set_trace()\n \n print(\"person{}\\t true:{}\\t estimated:{}\\t {}\".format(i, t[i], guess, (guess==t[i])))\n\n # result = np.concatenate((t.unsqueeze(1), guesses.unsqueeze(1)), axis=1)\n # np.savetxt('result.csv', result)\n \n # attack acc\n num_correct = np.count_nonzero(guesses == t)\n num_rows = x.shape[0]\n attack_acc = num_correct / num_rows\n \n \n print(\"Attack Acc:{:.2f} \".format(attack_acc * 100))\n\n logger.info(\"=> Attack Finished.\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"SCccc21/mi","sub_path":"privacy/toy2.py","file_name":"toy2.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35226498582","text":"from django.core.handlers.wsgi import WSGIRequest\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here.\nfrom django.template import loader\n\nfrom birthdays.forms import BirthdayForm\nfrom birthdays.models import Birthday\nfrom django.shortcuts import render\n\n\n# def detail(request, birthday_id):\n# try:\n# birthday = Birthday.objects.get(pk=birthday_id)\n# except Birthday.DoesNotExist:\n# raise Http404(\"Question does not exist\")\n# return render(request, 'birthdays/datail.html', {'birthday': birthday})\ndef detail(request, birthday_id):\n birthday = get_object_or_404(Birthday, pk=birthday_id)\n if request.method == 'POST':\n form = BirthdayForm(request.POST, instance=birthday)\n if form.is_valid():\n form.save()\n result = \"Изменения успешно внесены\"\n else:\n result = \"Изменения не внесены\"\n return render(request, 'birthdays/detail.html',\n context={'form': form, \"result\": result, \"birthday\": birthday})\n\n else:\n form = BirthdayForm(instance=birthday)\n return render(request, 'birthdays/detail.html', {'form': form, \"birthday\": birthday})\n\n\n# def index(request):\n# user_birthdays_list = Birthday.objects.all()\n# template = loader.get_template('birthdays/birthday.html')\n# context = {\n# 'user_birthdays_list': user_birthdays_list,\n# }\n# # output = '----------- '.join([BD.__str__() for BD in user_birthdays_list])\n# return HttpResponse(template.render(context, request))\ndef add(request):\n if request.method == 'POST':\n form = BirthdayForm(request.POST)\n if form.is_valid():\n birthday = form.save(commit=False)\n birthday.user = request.user\n birthday.save()\n return HttpResponseRedirect('/done')\n else:\n form = BirthdayForm()\n return render(request, 'birthdays/add.html', context={'form': form})\n\n\ndef index(request):\n request: WSGIRequest\n if not request.user.is_anonymous:\n user_birthdays_list = Birthday.objects.filter(user=request.user)\n paginator = Paginator(user_birthdays_list, 25)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n 'page_obj': page_obj,\n }\n else:\n context = {}\n return render(request, 'birthdays/index.html', context)\n\n\ndef done(request):\n return render(request, 'birthdays/done.html')\n","repo_name":"PabloGolobar/BirthdayBot3.0","sub_path":"birthdays/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72945745798","text":"import sys\nimport os\nimport time\nimport logging\nfrom shakedown.scout.database import Database\nfrom shakedown.config import config\nfrom shakedown.session import sessions\nfrom shakedown.scout import handlers\nfrom shakedown.util import to_list\n\nlogger = logging.getLogger(__name__)\n#logging.basicConfig(level=logging.DEBUG)\n\n_cached = []\ndatabase = Database()\n\n# def gather(endpoints=r\".*\", tables=[]):\n# tables = to_list(tables)\n#\n# for h_name, handler in handlers.handlers:\n#\n# for key, commands, callback in handler.CMDS:\n#\n# t_name = h_name + \".\" + key\n# #print(\"gathering...\", type(t_name, tables, endpoint)\n# if len(tables) > 0 and t_name not in tables:\n# logger.debug(\"skipping table '{}' due to filter\".format(t_name))\n# continue\n#\n# tbl = database[t_name]\n#\n# # TODO: in order support 'text' encoding add extra args field to\n# # handler.CMDS\n# responses = sessions._send(endpoints, list(commands), encoding='json')\n#\n# for response in responses:\n# hostaddr = response.session.hostaddr\n#\n# if response.status != \"ok\":\n# logger.warning(response.errored)\n# continue\n#\n# #print(type(response.responses))\n# response = callback(response)\n#\n# if not isinstance(response, list):\n# response = [response]\n#\n# for item in response:\n# item = dict(item)\n#\n# if not tbl.find_one(item):\n# tbl.insert_one({\n# \"_dut\": hostaddr,\n# \"_timestamp\": int(time.time()),\n# **item\n# })\n\n# refresh = gather\n\ndef _get_handler(table):\n for handler_name, handler in handlers.handlers:\n for key, cmds, callback in handler.CMDS:\n _table = \".\".join([handler_name, key])\n if _table == table:\n return (database[table], cmds, callback)\n\n raise ValueError(\"table '{}' not found\".format(table))\n\ndef _get_endpoints(filt):\n return [sess.endpoint for sess in sessions.filter(filt)]\n\ndef _prepare_query(endpoints=None, query={}):\n if endpoints:\n query[\"_dut\"] = { \"$in\": endpoints }\n\n return query\n\ndef _get_cache_key(table, endpoint):\n return \"::\".join([table, endpoint])\n\ndef _get_not_cached(table, endpoints):\n return [ep for ep in endpoints if _get_cache_key(table, ep) not in _cached]\n\ndef _set_cached(table, endpoint):\n key = _get_cache_key(table, endpoint)\n\n if key not in _cached:\n _cached.append(key)\n\ndef _cache(table, endpoints):\n endpoints = _get_not_cached(table, endpoints)\n if not endpoints:\n return\n table, commands, callback = _get_handler(table)\n responses = sessions.send(endpoints, list(commands), encoding='json')\n\n for response in responses:\n hostaddr = response.session.hostaddr\n\n if response.code != 0:\n logger.warning(response.errored)\n continue\n\n response = callback(response)\n\n if not isinstance(response, list):\n response = [response]\n\n for item in response:\n item = dict(item)\n\n if not table.find_one(item):\n table.insert_one({\n \"_dut\": hostaddr,\n \"_timestamp\": int(time.time()),\n **item\n })\n else:\n table.update_one(item, {\n \"_dut\": hostaddr,\n \"_timestamp\": int(time.time()),\n **item\n })\n\n _set_cached(str(table), hostaddr)\n\ndef find(table, filt=None, query={}):\n endpoints = _get_endpoints(filt)\n\n _cache(table, endpoints)\n\n query = _prepare_query(endpoints, query)\n result = database[table].find(query)\n\n if result:\n result = list(result)\n\n return result\n\ndef find_one(table, filt=None, query={}):\n endpoints = _get_endpoints(filt)\n\n _cache(table, endpoints)\n\n query = _prepare_query(endpoints, query)\n return database[table].find_one(query)\n","repo_name":"arista-northwest/shakedown","sub_path":"shakedown/scout/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"5021464884","text":"#Jackson Johnson\r\n#Written for the Hartford Proffession Chapter of EWB-USA\r\n#January 2020\r\n#jackson.david.johnson@gmail.com\r\n\r\n#computes the cloud cover and vegatitation coverage for a given satellite image\r\n\r\nimport numpy as np\r\nfrom matplotlib.image import imread\r\nimport matplotlib.pyplot as plt\r\nimport imageio as iio\r\n\r\n\r\n#I am using a seperate script to crop the raw images as it makes them easier to work with.\r\n#The file names below represent the cropped versions. \r\nfilnames = ['2018-12-30.png',\r\n\t\t\t'2019-01-04.png',\r\n\t\t\t'2019-01-09.png',\r\n\t\t\t'2019-01-14.png',\r\n\t\t\t'2019-01-19.png',\r\n\t\t\t'2019-01-24.png',\r\n\t\t\t'2019-01-29.png',\r\n\t\t\t'2019-02-03.png',\r\n\t\t\t'2019-02-08.png',\r\n\t\t\t'2019-03-10.png',\r\n\t\t\t'2019-03-30.png',\r\n\t\t\t'2019-04-04.png',\r\n\t\t\t'2019-04-14.png',\r\n\t\t\t'2019-04-24.png',\r\n\t\t\t'2019-05-04.png',\r\n\t\t\t'2019-05-09.png',\r\n\t\t\t'2019-05-14.png',\r\n\t\t\t'2019-05-19.png',\r\n\t\t\t'2019-05-24.png',\r\n\t\t\t'2019-05-29.png',\r\n\t\t\t'2019-06-03.png',\r\n\t\t\t'2019-06-08.png',\r\n\t\t\t'2019-06-13.png',\r\n\t\t\t'2019-06-23.png',\r\n\t\t\t'2019-07-13.png',\r\n\t\t\t'2019-08-02.png',\r\n\t\t\t'2019-08-12.png',\r\n\t\t\t'2019-08-22.png',\r\n\t\t\t'2019-08-27.png',\r\n\t\t\t'2019-09-01.png',\r\n\t\t\t'2019-09-11.png',\r\n\t\t\t'2019-09-26.png',\r\n\t\t\t'2019-10-01.png',\r\n\t\t\t'2019-10-11.png',\r\n\t\t\t'2019-10-21.png',\r\n\t\t\t'2019-11-10.png',\r\n\t\t\t'2019-11-20.png',\r\n\t\t\t'2019-11-25.png',\r\n\t\t\t'2019-12-05.png',\r\n\t\t\t'2019-12-10.png',\r\n\t\t\t'2019-12-20.png',\r\n\t\t\t'2019-12-30.png',\r\n\t\t\t'2020-01-14.png',\r\n\t\t\t'2020-01-19.png',\r\n\t\t\t'2020-01-24.png']\r\n\r\n\r\n\r\n\r\ndef HSL_Convert(RGBpix,SF):\r\n\tR = RGBpix[0]/SF\r\n\tG = RGBpix[1]/SF\r\n\tB = RGBpix[2]/SF\r\n\t\r\n\tCmax = max(R,G,B)\r\n\tCmin = min(R,G,B)\r\n\tDelta = Cmax-Cmin\r\n\tCmaxPos = np.argmax(RGBpix)\r\n\r\n\t\r\n\t#Lightness Calculation\r\n\tL = (Cmax+Cmin)/2\r\n\t\r\n\t#Saturation Calculation\r\n\tS = Delta/(1-np.absolute(2*L-1))\r\n\t\r\n\t#Hue Calculation\r\n\tif Delta == 0:\r\n\t\tH = 0\r\n\telse:\r\n\t\tif CmaxPos == 0:\r\n\t\t\tH = (G-B)/(Cmax-Cmin)*60\r\n\t\tif CmaxPos == 1:\r\n\t\t\tH = (2+(B-R)/(Cmax-Cmin))*60\r\n\t\tif CmaxPos == 2:\r\n\t\t\tH = (4+(R-G)/(Cmax-Cmin))*60\r\n\t\r\n\treturn [H,S,L]\r\n\r\n\r\n\r\nfor k in filnames:\r\n\t#The text file is going to contain green coverage and cloud cover percentages\r\n\t#for the pictures in the list above. Handy for plotting in excel.\r\n\ttext_Data = open('PlantActivity.txt','a')\r\n\t\r\n\t#Read in the image as array the is width x height x 3 and get it's shape\r\n\tdata = imread(k)\r\n\tdata_dim = data.shape\r\n\t\r\n\t#initialize the counting variables (or reset after the first loop)\r\n\tcloudCount = 0\r\n\tgreenCount = 0\r\n\tbrownCount = 0\r\n\t\r\n\t#Here is where we start looping through the individual pixels. I need to look \r\n\t#at map functions. Some possiblility to speed increases. Let me know if you\r\n\t#get them to work\r\n\tfor i in range(data_dim[0]):\r\n\t\tfor j in range(data_dim[1]):\r\n\t\t\t\r\n\t\t\t#Here is where the individual pixel is converted to HSL with the above function\r\n\t\t\tHSLout = HSL_Convert(data[i,j,:],1)\r\n\t\t\t\r\n\t\t\t#These are the decision gates for determining if a pixel is cloud, green,\r\n\t\t\t#or other (dirt in my case). Feel free to tune the values in the logic \r\n\t\t\t#statements to pick narrower or wider bands of colors. These just worked\r\n\t\t\t#for my situation\r\n\t\t\tif HSLout[2] > .8:\r\n\t\t\t\tcloudCount = cloudCount + 1\r\n\t\t\t\tdata[i,j,:] = [0,0,1]\r\n\t\t\telif 40 < HSLout[0] < 220:\r\n\t\t\t\tdata[i,j,:] = [0,1,0]\r\n\t\t\t\tgreenCount = greenCount + 1\r\n\t\t\telse:\r\n\t\t\t\tbrownCount = brownCount + 1\r\n\t\t\t\tdata[i,j,:] = [1,0,0]\r\n\t\r\n\t#Here is the math for turing the cloud and green counters into percentages. Notice\r\n\t#that the green percentage is the percentage of the image remaining after cloud \r\n\t#cover has been removed\r\n\tcloudCover = cloudCount/(data_dim[0]*data_dim[1])*100\r\n\tPlantAvtivity = greenCount/(data_dim[0]*data_dim[1]-cloudCount)*100\r\n\t\r\n\t#write the data out to the text file\r\n\ttext_Data.write(k+','+str(cloudCover)+','+str(PlantAvtivity)+'\\n')\r\n\t\r\n\t# write out the mask to an image. Nice when you are tuning the logic gates above.\r\n\tiio.imwrite('GOM-Mask-'+k,data)\r\n\tdel data\r\n\tdel data_dim\r\n\t\r\n\t#I'm closing the text file (which saves the data) because I was expieriencing \r\n\t#computer crashes and didn't want to loose the data. In theory, you should just\r\n\t#be able to close it outside the loop and save a little I/O time.\r\n\ttext_Data.close()\r\n","repo_name":"JacksonJohnsonEWB/EWB-ImageProcessing","sub_path":"GreenOmeter.py","file_name":"GreenOmeter.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16983171370","text":"import arrow\nimport logging\nfrom tabulate import tabulate\nfrom pydantic import BaseModel, Field\nfrom typing import Dict, List\nfrom .types import Sample, OdooSample, Function\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef monkey_patch_function_call(function: Function, sample_idx: int, halpert: 'Halpert'):\n original_call = function.call\n def call(*args, **kwargs):\n if not sample_idx in halpert.sample_functions_:\n halpert.sample_functions_[sample_idx] = []\n halpert.sample_functions_[sample_idx].append(function.slug)\n\n return original_call(*args, **kwargs)\n function.call = call\n\n\nclass Halpert(BaseModel):\n samples: List[Sample]\n odoo_snapshot_dir: str | None = None\n\n sample_functions_: Dict[int, List[str]] = Field(default_factory=dict)\n sample_quiz_: Dict[int, List[Sample.Evaluation.QuizItem]] = Field(default_factory=dict)\n\n\n def prepare(self, sample: Sample) -> List[Function]:\n if isinstance(sample, OdooSample):\n from halpert.functions.odoo.snapshot.restore import restore as restore_odoo_snapshot\n\n if not self.odoo_snapshot_dir:\n raise ValueError('odoo_snapshot_dir must be set when using OdooSample')\n restore_odoo_snapshot(sample.snapshot, self.odoo_snapshot_dir)\n \n def utcnow():\n return arrow.get(sample.date)\n arrow.utcnow = utcnow\n\n idx = self.samples.index(sample)\n functions = [Function(**f.dict()) for f in sample.functions]\n for f in functions:\n monkey_patch_function_call(f, idx, self)\n return functions\n\n\n def submit(self, sample: Sample, quiz: List[Sample.Evaluation.QuizItem]):\n idx = self.samples.index(sample)\n self.sample_quiz_[idx] = quiz\n\n\n def evaluate(self):\n assert list(range(len(self.samples))) == sorted(self.sample_functions_.keys())\n assert list(range(len(self.samples))) == sorted(self.sample_quiz_.keys())\n \n quiz_answers = []\n results = []\n for index, sample in enumerate(self.samples):\n function_slugs_called = self.sample_functions_[index]\n quiz = self.sample_quiz_[index]\n\n quiz_answers_correct = [\n expected.answer == (quiz[i].answer if i < len(quiz) else False)\n for i, expected in enumerate(sample.expected.quiz)\n ]\n quiz_answers.extend([{\n 'Sample': sample.name,\n 'Question': expected.question,\n 'Expected': expected.answer,\n 'Actual': quiz[i].answer if i < len(quiz) else '',\n 'Correct': quiz_answers_correct[i],\n } for i, expected in enumerate(sample.expected.quiz)])\n\n expected_functions_used = set(function_slugs_called) & set(sample.expected.functions)\n\n results.append({\n 'Sample': sample.name,\n 'Quiz Score': sum(quiz_answers_correct) / len(quiz_answers_correct),\n 'Functions Score': len(expected_functions_used) / len(sample.expected.functions),\n 'Steps': len(function_slugs_called),\n })\n \n table = tabulate({ k: [r[k] for r in quiz_answers] for k in quiz_answers[0].keys() }, headers='keys')\n logger.info('Quiz Answers:\\n' + table)\n\n table = tabulate({ k: [r[k] for r in results] for k in results[0].keys() }, headers='keys')\n logger.info('Evaluation:\\n' + table)\n\n","repo_name":"davidfant/evals","sub_path":"halpert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9094399267","text":"#!/usr/bin/python3\n\"\"\"\na function that adds 2 integers\nit takes the inputs and makes sure they are of type int\nif they are not, it raises a TypeError\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"\n adding two integers,\n returns the sum.\n adding line for checker\n \"\"\"\n\n if not isinstance(a, (int, float)):\n raise TypeError('a must be an integer')\n\n if not isinstance(b, (int, float)):\n raise TypeError('b must be an integer')\n try:\n return int(a) + int(b)\n except:\n raise\n","repo_name":"dreeseh/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36449114268","text":"import time\nimport logging\nimport logging.handlers\n\nfrom fng_config import config\n\n\nlogger = logging.getLogger()\n\n\ndef setup_logger():\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(process)d-%(threadName)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n file_handler = logging.handlers.RotatingFileHandler(\"{}/{}.{}.log\".format(\n config.get_logger()['path'],\n config.get_logger()['name'],\n time.strftime(\"%Y-%m-%d\")),\n maxBytes=10485760, backupCount=5, encoding=\"utf-8\")\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n\nsetup_logger()\n","repo_name":"ssfzxc/sz_hosp_kill","sub_path":"fng_logger.py","file_name":"fng_logger.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21985401123","text":"from inspect import getmembers, isfunction\nfrom pathlib import Path\nfrom string import Template\nfrom timeit import timeit\n\n\ndef loop(functions_list, stmt_tmpl, setup_tmpl, number):\n # Run\n results = []\n for function_name, _ in functions_list:\n stmt = stmt_tmpl.safe_substitute(dict(f=function_name))\n setup = setup_tmpl.safe_substitute(dict(f=function_name))\n elapsed_time = timeit(stmt, setup, number=number)\n results.append((function_name, elapsed_time))\n\n # Display\n for function, elapsed_time in sorted(results, key=lambda n:n[1]):\n print(\"time %s: %.3f seconds\" % (function, elapsed_time))\n\n\nif __name__ == \"__main__\":\n\n for module_name in filter(Path.is_file, Path(\".\").glob(\"speed_*.py\")):\n print(\">>\", module_name)\n speed_module = __import__(module_name.stem)\n\n loop(\n functions_list=getmembers(speed_module, isfunction),\n stmt_tmpl=Template(speed_module.STMT_TMPL),\n setup_tmpl=Template(f\"from {module_name.stem} import $f\"),\n number=speed_module.ITERATION\n )\n","repo_name":"abnmy/learning","sub_path":"python/main_speed.py","file_name":"main_speed.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30169855248","text":"def dq_sorted_arr(nums):\n l=len(nums)\n i=0 # constant pointer\n j=l-1 # moving pointer\n\n while j>0:\n if abs(nums[i])>abs(nums[j]):\n nums[i],nums[j]=nums[j],nums[i]\n j-=1\n\n for n in range(l):\n nums[n]=nums[n]**2\n\n return nums\n\nif __name__==\"__main__\":\n print(dq_sorted_arr([-4,-1,0,3,10]))\n print(dq_sorted_arr([-7,-3,2,3,11]))","repo_name":"noviicee/Banque-De-Questions","sub_path":"Challenges/100DaysofCode/Day-4/Squares of Sorted Array/square_of_sorted_array.py","file_name":"square_of_sorted_array.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"24230155109","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import ArrayType, StringType, TimestampType\n\nspark = SparkSession.builder.config('spark.jars', '../lib/spark-sql-kafka/spark-sql-kafka-0-10_2.12-3.2.1.jar,'\n '../lib/spark-sql-kafka/kafka-clients-2.8.1.jar,'\n '../lib/spark-sql-kafka/spark-token-provider-kafka-0-10_2.12-3.2.1.jar,'\n '../lib/spark-sql-kafka/commons-pool2-2.8.1.jar')\\\n .getOrCreate()\n\ndf = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"10.211.55.9:9092,10.211.55.10:9092,10.211.55.11:9092\") \\\n .option(\"subscribe\", \"mytopic\") \\\n .load() \\\n .selectExpr(\"CAST(value AS STRING)\", \"cast(timestamp as timestamp)\")\n\n'''\n@udf(returnType=ArrayType(StringType()))\ndef my_split2(x):\n return x.split(' ')\n'''\n\n# explode turns each item in an array into a separate row\n# df.value.split(' ') # the warning occurred\n'''\nwords = df \\\n .select(explode(split(df.value, ' ')).alias('word'), 'timestamp') \\\n .groupby(window(df.timestamp, \"10 seconds\"), 'word') \\\n .count() \\\n .writeStream \\\n .trigger(processingTime=\"10 seconds\") \\\n .outputMode('complete') \\\n .format('console') \\\n .foreach(lambda each: print(each)) \\\n .start() \\\n .awaitTermination()\n'''\n\nwords = df \\\n .select(explode(split(df.value, ' ')).alias('word'), 'timestamp') \\\n .groupby(window(df.timestamp, \"10 seconds\"), 'word') \\\n .count() \\\n .orderBy('count', ascending=False) \\\n .writeStream \\\n .trigger(processingTime=\"10 seconds\") \\\n .outputMode('complete') \\\n .option(\"truncate\", False) \\\n .format('console') \\\n .start() \\\n .awaitTermination()\n\n# wordCounts = words.groupby('word').count().orderBy('count',ascending=False)\n'''\nwordCounts.writeStream \\\n .trigger(processingTime=\"10 seconds\") \\\n .outputMode('complete') \\\n .format('console') \\\n .start() \\\n .awaitTermination()\n'''\n\n# Generate running word count\n# wordCounts = words.groupBy('word').count()\n","repo_name":"ergemp/pyspark_reference2","sub_path":"sql_streaming_examples/kafka_word_count_example.py","file_name":"kafka_word_count_example.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1645071198","text":"# 1303. 전쟁 - 전투\r\n# 72ms\r\n\r\nVECTOR = [(0, 1), (0, -1), (1, 0), (-1, 0)]\r\n\r\nh, w = map(int, input().split())\r\nmaps = [list(input()) for _ in range(h)]\r\n\r\npower = {'W': 0, 'B': 0}\r\nfor row in range(h):\r\n for col in range(w):\r\n if maps[row][col]:\r\n size = 1\r\n team = maps[row][col]\r\n maps[row][col] = False\r\n que = [(row, col)]\r\n\r\n while que:\r\n y, x = que.pop(0)\r\n\r\n for dy, dx in VECTOR:\r\n ny, nx = y + dy, x + dx\r\n\r\n if 0 <= ny < h and 0 <= nx < w:\r\n if maps[ny][nx] == team:\r\n size += 1\r\n que.append((ny, nx))\r\n maps[ny][nx] = False\r\n\r\n power[team] += size ** 2\r\n\r\nprint(*power.values())","repo_name":"cerezo00/23_AlgorithmStudy","sub_path":"3_DFS,BFS/D_1303_남수민.py","file_name":"D_1303_남수민.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31638794395","text":"def dp_palindrome(X):\n \n # Create a reverse of the list to compare for longest common subsequence\n Y = X[::-1]\n\n # Initialize empty matrix\n n = len(X)\n C = [[0 for _ in range(len(X)+1)] for __ in range(len(X)+1)]\n D = [[0 for _ in range(len(X)+1)] for __ in range(len(X)+1)]\n\n # Simple case check\n if X == Y:\n return X\n\n elif X == \"\":\n return \"\"\n\n # Filling the matrix\n for i in range(1,n+1):\n for j in range(1,n+1):\n\n # Alter index values to counter offset for row and column of 0s\n ii = i-1\n jj = j-1\n\n if Y[ii] == X[jj]:\n C[i][j] = C[i-1][j-1] + 1\n D[i][j] = \"up-left\"\n\n else:\n m = max(C[i-1][j], C[i][j-1]) \n if m == C[i-1][j]:\n C[i][j] = C[i-1][j]\n D[i][j] = \"up\"\n\n elif m == C[i][j-1]:\n C[i][j] = C[i][j-1]\n D[i][j] = \"left\"\n \n # Complete answer retrival via back pointers\n row = n\n column = n\n LCS_palindrome = \"\"\n\n while row > 0 and column > 0:\n\n if D[row][column] == \"up-left\":\n LCS_palindrome += Y[row-1]\n row -= 1\n column -= 1\n \n elif D[row][column] == \"up\":\n row -= 1\n \n elif D[row][column] == \"left\":\n column -= 1\n \n return LCS_palindrome\n\ndef main():\n\n user_input = input(\"\")\n inputs = []\n\n while user_input != \"\":\n inputs.append(user_input)\n\n for string in inputs:\n print(dp_palindrome(string))\n\n\nmain()","repo_name":"Ryan-JW-Kim/Year3Sem2","sub_path":"CP312/a4/lpal.py","file_name":"lpal.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26708594678","text":"from contextlib import contextmanager\nfrom typing import Optional, Dict, Any\n\nfrom rich.console import Console\nfrom rich.table import Table\n\n\nout = Console()\n\n\n@contextmanager\ndef busy(message: str, spinner: str = \"aesthetic\"):\n with out.status(message, spinner=spinner):\n yield\n\n\ndef print(message: str):\n out.print(message)\n\n\ndef result(\n message: Optional[str],\n rows: Optional[Dict[str, Any]] = None,\n is_success: bool = True,\n):\n\n o = \"[bold green] Success!\" if is_success else \"[bold red] Failure!\"\n out.rule(o)\n\n if message and not is_success:\n out.print(f\"[bold red]Failure:[\\bold red] {message}\")\n return\n\n if message:\n out.print(message)\n\n if rows:\n t = Table(\"Results\")\n\n for k in rows:\n t.add_row(k, rows[k])\n out.print(t)\n","repo_name":"FergusInLondon/dji_parse","sub_path":"dji_parse/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"35365886146","text":"# -*- coding: utf-8 -*-\n\n\nfrom bs4 import BeautifulSoup, NavigableString, Tag\nimport re, functools\n\nclean_parser = functools.partial(BeautifulSoup, features=\"html.parser\")\ndel BeautifulSoup\n\nSKIPPED_TAGS = [\"head\", \"a\", \"textarea\", \"pre\", \"code\",\n \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\", \"h7\", \"h8\"] # IMPORTANT - the content of these tags must stay as-is\n\n\ndef join_regular_expressions_as_disjunction(regexes, as_words=False):\n if as_words:\n regexes = (r\"(?:\\b\" + regex + r\"\\b)\" for regex in regexes)\n else:\n regexes = (r\"(?:\" + regex + r\")\" for regex in regexes)\n return \"|\".join(regexes)\n\n\ndef generate_links(html_snippet, regex, link_attr_generator):\n soup = clean_parser(html_snippet) # that parser doesn't add or tags\n\n def generate_link_str(match_obj):\n\n try:\n content = match_obj.group(\"content\") # named subgroup\n except LookupError:\n content = match_obj.group(0) # the entire matched keyword\n\n attrs = link_attr_generator(match_obj)\n if attrs:\n tag = soup.new_tag(\"a\", **attrs)\n tag.string = content\n return str(tag)\n else:\n return content\n\n def insert_links(string):\n new_string, occurences = re.subn(regex,\n generate_link_str,\n string,\n flags=re.IGNORECASE | re.UNICODE | re.DOTALL | re.MULTILINE)\n if not occurences:\n assert string == new_string\n None\n else:\n mini_soup = clean_parser(new_string)\n new_children = mini_soup.contents\n return new_children\n\n def recurse_elements(element):\n children = tuple(element.contents) # we freeze current children, as they'll be modified here\n for child in children: # no enumerate() here, as the tree changes all the time\n if isinstance(child, NavigableString):\n new_children = insert_links(str(child))\n if new_children:\n current_index = element.index(child)\n child.extract()\n for new_child in reversed(new_children):\n element.insert(current_index, new_child)\n else:\n assert child.name.lower() == child.name # LOWERCASE\n if child.name not in SKIPPED_TAGS: # necessarily a Tag\n recurse_elements(child)\n\n recurse_elements(soup)\n return str(soup)\n\n\nif __name__ == \"__main__\":\n #Create the soup\n input = '''\n Page title one\n \n

This is one paragraph one.\n This is one paragraph one.\n '''\n\n res = generate_links(input, \"one\", lambda x: dict(href=\"TARGET\", title=\"mytitle\"))\n\n print(res)\n","repo_name":"ChrysalisTeam/pychronia","sub_path":"pychronia_game/utilities/autolinker.py","file_name":"autolinker.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"34028329540","text":"import requests\r\nfrom bs4 import BeautifulSoup as BS\r\n#link='http://127.0.0.1:8000/wiki/Atletico Madrid2'\r\nfrom global_ import link\r\nimport store_speak\r\n#print(link)\r\nresponse=requests.get(link)\r\n\r\nsoup=BS(response.content,'html.parser')\r\n\r\nf=soup.find('div',class_='main col-lg-10 col-md-9')\r\n\r\nlines=f.find_all('p')\r\ntext=str(lines)\r\ntext=text.replace('

','\\n')\r\ntext=text.replace('

','\\n')\r\ntext=text.replace('[','')\r\ntext=text.replace(']','')\r\ntext=list(text)\r\n#print(text)\r\nfilename=\"C:/Users/Akash/Downloads/Humanoid/Web_scraping/search_result.txt\"\r\nstore_speak.filename=\"C:/Users/Akash/Downloads/Humanoid/Web_scraping/search_result.txt\"\r\nfile=open(filename,'w')\r\nfor x in text:\r\n #print(x,end='')\r\n #text=line.text\r\n #print()\r\n #print(type(text))\r\n store_speak.Store(x)\r\n\r\nstore_speak.Read(store_speak.filename)\r\n \r\n\r\n","repo_name":"ATM10919/Humanoid","sub_path":"Web_scraping/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5469879819","text":"import abc\nimport copy\nimport dataclasses\nimport datetime\nimport functools\nimport json\nimport logging\nimport requests\nimport time\nimport warnings\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass _DeprecatedProperty:\n\tdef __init__(self, name, repl, replStr):\n\t\tself.name = name\n\t\tself.repl = repl\n\t\tself.replStr = replStr\n\n\tdef __get__(self, obj, objType):\n\t\tif obj is None: # if the access is through the class using _DeprecatedProperty rather than an instance of the class:\n\t\t\treturn self\n\t\twarnings.warn(f'{self.name} is deprecated, use {self.replStr} instead', FutureWarning, stacklevel = 2)\n\t\treturn self.repl(obj)\n\n\ndef _json_serialise_datetime(obj):\n\t'''A JSON serialiser that converts datetime.datetime and datetime.date objects to ISO-8601 strings.'''\n\n\tif isinstance(obj, (datetime.datetime, datetime.date)):\n\t\treturn obj.isoformat()\n\traise TypeError(f'Object of type {type(obj)} is not JSON serializable')\n\n\ndef _json_dataclass_to_dict(obj):\n\tif isinstance(obj, _JSONDataclass) or dataclasses.is_dataclass(obj):\n\t\tout = {}\n\t\tout['_type'] = f'{type(obj).__module__}.{type(obj).__name__}'\n\t\tfor field in dataclasses.fields(obj):\n\t\t\tassert field.name != '_type'\n\t\t\tif field.name.startswith('_'):\n\t\t\t\tcontinue\n\t\t\tout[field.name] = _json_dataclass_to_dict(getattr(obj, field.name))\n\t\t# Add in (non-deprecated) properties\n\t\tfor k in dir(obj):\n\t\t\tif isinstance(getattr(type(obj), k, None), property):\n\t\t\t\tassert k != '_type'\n\t\t\t\tif k.startswith('_'):\n\t\t\t\t\tcontinue\n\t\t\t\tout[k] = _json_dataclass_to_dict(getattr(obj, k))\n\t\treturn out\n\telif isinstance(obj, (tuple, list)):\n\t\treturn type(obj)(_json_dataclass_to_dict(x) for x in obj)\n\telif isinstance(obj, dict):\n\t\treturn {_json_dataclass_to_dict(k): _json_dataclass_to_dict(v) for k, v in obj.items()}\n\telif isinstance(obj, set):\n\t\treturn {_json_dataclass_to_dict(v) for v in obj}\n\telse:\n\t\treturn copy.deepcopy(obj)\n\n\n@dataclasses.dataclass\nclass _JSONDataclass:\n\t'''A base class for dataclasses for conversion to JSON'''\n\n\tdef json(self):\n\t\t'''Convert the object to a JSON string'''\n\n\t\tout = _json_dataclass_to_dict(self)\n\t\tfor key, value in list(out.items()): # Modifying the dict below, so make a copy first\n\t\t\tif isinstance(value, IntWithGranularity):\n\t\t\t\tout[key] = int(value)\n\t\t\t\tassert f'{key}.granularity' not in out, f'Granularity collision on {key}.granularity'\n\t\t\t\tout[f'{key}.granularity'] = value.granularity\n\t\treturn json.dumps(out, default = _json_serialise_datetime)\n\n\n@dataclasses.dataclass\nclass Item(_JSONDataclass):\n\t'''An abstract base class for an item returned by the scraper's get_items generator.\n\n\tAn item can really be anything. The string representation should be useful for the CLI output (e.g. a direct URL for the item).\n\t'''\n\n\t@abc.abstractmethod\n\tdef __str__(self):\n\t\tpass\n\n\n@dataclasses.dataclass\nclass Entity(_JSONDataclass):\n\t'''An abstract base class for an entity returned by the scraper's entity property.\n\n\tAn entity is typically the account of a person or organisation. The string representation should be the preferred direct URL to the entity's page on the network.\n\t'''\n\n\t@abc.abstractmethod\n\tdef __str__(self):\n\t\tpass\n\n\nclass IntWithGranularity(int):\n\t'''A number with an associated granularity\n\n\tFor example, an IntWithGranularity(42000, 1000) represents a number on the order of 42000 with two significant digits, i.e. something counted with a granularity of 1000.\n\t'''\n\n\tdef __new__(cls, value, granularity, *args, **kwargs):\n\t\tobj = super().__new__(cls, value, *args, **kwargs)\n\t\tobj.granularity = granularity\n\t\treturn obj\n\n\tdef __reduce__(self):\n\t\treturn (IntWithGranularity, (int(self), self.granularity))\n\n\nclass URLItem(Item):\n\t'''A generic item which only holds a URL string.'''\n\n\tdef __init__(self, url):\n\t\tself._url = url\n\n\t@property\n\tdef url(self):\n\t\treturn self._url\n\n\tdef __str__(self):\n\t\treturn self._url\n\n\nclass ScraperException(Exception):\n\tpass\n\n\nclass Scraper:\n\t'''An abstract base class for a scraper.'''\n\n\tname = None\n\n\tdef __init__(self, retries = 3):\n\t\tself._retries = retries\n\t\tself._session = requests.Session()\n\n\t@abc.abstractmethod\n\tdef get_items(self):\n\t\t'''Iterator yielding Items.'''\n\n\t\tpass\n\n\tdef _get_entity(self):\n\t\t'''Get the entity behind the scraper, if any.\n\n\t\tThis is the method implemented by subclasses for doing the actual retrieval/entity object creation. For accessing the scraper's entity, use the entity property.\n\t\t'''\n\n\t\treturn None\n\n\t@functools.cached_property\n\tdef entity(self):\n\t\treturn self._get_entity()\n\n\tdef _request(self, method, url, params = None, data = None, headers = None, timeout = 10, responseOkCallback = None, allowRedirects = True):\n\t\tfor attempt in range(self._retries + 1):\n\t\t\t# The request is newly prepared on each retry because of potential cookie updates.\n\t\t\treq = self._session.prepare_request(requests.Request(method, url, params = params, data = data, headers = headers))\n\t\t\tlogger.info(f'Retrieving {req.url}')\n\t\t\tlogger.debug(f'... with headers: {headers!r}')\n\t\t\tif data:\n\t\t\t\tlogger.debug(f'... with data: {data!r}')\n\t\t\ttry:\n\t\t\t\tr = self._session.send(req, allow_redirects = allowRedirects, timeout = timeout)\n\t\t\texcept requests.exceptions.RequestException as exc:\n\t\t\t\tif attempt < self._retries:\n\t\t\t\t\tretrying = ', retrying'\n\t\t\t\t\tlevel = logging.INFO\n\t\t\t\telse:\n\t\t\t\t\tretrying = ''\n\t\t\t\t\tlevel = logging.ERROR\n\t\t\t\tlogger.log(level, f'Error retrieving {req.url}: {exc!r}{retrying}')\n\t\t\telse:\n\t\t\t\tredirected = f' (redirected to {r.url})' if r.history else ''\n\t\t\t\tlogger.info(f'Retrieved {req.url}{redirected}: {r.status_code}')\n\t\t\t\tif r.history:\n\t\t\t\t\tfor i, redirect in enumerate(r.history):\n\t\t\t\t\t\tlogger.debug(f'... request {i}: {redirect.request.url}: {r.status_code} (Location: {r.headers.get(\"Location\")})')\n\t\t\t\tif responseOkCallback is not None:\n\t\t\t\t\tsuccess, msg = responseOkCallback(r)\n\t\t\t\telse:\n\t\t\t\t\tsuccess, msg = (True, None)\n\t\t\t\tmsg = f': {msg}' if msg else ''\n\n\t\t\t\tif success:\n\t\t\t\t\tlogger.debug(f'{req.url} retrieved successfully{msg}')\n\t\t\t\t\treturn r\n\t\t\t\telse:\n\t\t\t\t\tif attempt < self._retries:\n\t\t\t\t\t\tretrying = ', retrying'\n\t\t\t\t\t\tlevel = logging.INFO\n\t\t\t\t\telse:\n\t\t\t\t\t\tretrying = ''\n\t\t\t\t\t\tlevel = logging.ERROR\n\t\t\t\t\tlogger.log(level, f'Error retrieving {req.url}{msg}{retrying}')\n\t\t\tif attempt < self._retries:\n\t\t\t\tsleepTime = 1.0 * 2**attempt # exponential backoff: sleep 1 second after first attempt, 2 after second, 4 after third, etc.\n\t\t\t\tlogger.info(f'Waiting {sleepTime:.0f} seconds')\n\t\t\t\ttime.sleep(sleepTime)\n\t\telse:\n\t\t\tmsg = f'{self._retries + 1} requests to {req.url} failed, giving up.'\n\t\t\tlogger.fatal(msg)\n\t\t\traise ScraperException(msg)\n\t\traise RuntimeError('Reached unreachable code')\n\n\tdef _get(self, *args, **kwargs):\n\t\treturn self._request('GET', *args, **kwargs)\n\n\tdef _post(self, *args, **kwargs):\n\t\treturn self._request('POST', *args, **kwargs)\n\n\t@classmethod\n\tdef cli_setup_parser(cls, subparser):\n\t\tpass\n\n\t@classmethod\n\tdef cli_from_args(cls, args):\n\t\treturn cls._construct(args)\n\n\t@classmethod\n\tdef cli_construct(cls, argparseArgs, *args, **kwargs):\n\t\treturn cls(*args, **kwargs, retries = argparseArgs.retries)\n\n\ndef nonempty_string(name):\n\tdef f(s):\n\t\ts = s.strip()\n\t\tif s:\n\t\t\treturn s\n\t\traise ValueError('must not be an empty string')\n\tf.__name__ = name\n\treturn f\n","repo_name":"shaadclt/Twitter-Hashtag-Analysis","sub_path":"tenv/Lib/site-packages/snscrape/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7167,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"23181170686","text":"from __future__ import annotations\n\nimport os\nimport cachetools\nimport numpy as np\nimport sys\nfrom . import jacktools\nimport signal\nimport math\nimport textwrap\nfrom typing import TYPE_CHECKING\nimport emlib.dialogs\nimport emlib.iterlib\nimport emlib.misc\nimport subprocess\nimport bisect\n\n\nif TYPE_CHECKING:\n from .instr import Instr\n from typing import *\n from csoundlib import AudioDevice, MidiDevice\n\n\n_registry: dict[str, Any] = {}\n\n\ntry:\n import xxhash\n def ndarrayhash(a: np.ndarray) -> str:\n if a.flags.contiguous:\n return xxhash.xxh128_hexdigest(a)\n else:\n return str(id(a))\n\nexcept ImportError:\n import hashlib\n def ndarrayhash(a: np.ndarray) -> str:\n return hashlib.sha1(a).hexdigest()\n\n\n@cachetools.cached(cache=cachetools.TTLCache(1, 20))\ndef isrunning(prog: str) -> bool:\n \"True if prog is running\"\n if sys.platform == 'linux':\n failed = subprocess.call(['pgrep', '-f', prog],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return not failed\n else:\n raise RuntimeError(f\"Platform {sys.platform} not supported\")\n\n\ndef m2f(midinote: float, a4: float) -> float:\n \"\"\"\n Convert a midi-note to a frequency\n \"\"\"\n return 2**((midinote-69)/12.0)*a4\n\n\ndef arrayNumChannels(a: np.ndarray) -> int:\n \"\"\"\n Return the number of channels in a numpy array holding audio data\n \"\"\"\n return 1 if len(a.shape) == 1 else a.shape[1]\n\n\ndef unflattenArray(a: np.ndarray, numchannels: int) -> None:\n \"\"\"\n Unflatten array in place\n\n Args:\n a: the array to unflatten\n numchannels: the number of audio channels in the data\n \"\"\"\n if len(a.shape) > 1:\n if a.shape[1] != numchannels:\n raise ValueError(\"Array is not flat but the number of channels\"\n f\"diverge (given numchannels={numchannels}, \"\n f\"array number of channels: {a.shape[1]}\")\n return\n numrows = len(a) / numchannels\n if numrows != int(numrows):\n raise ValueError(\"The array does not have an integral number of frames. \"\n f\"(length: {len(a)} / {numchannels} = {numrows}\")\n a.shape = (numrows, numchannels)\n\n\ndef getChannel(samples: np.ndarray, channel: int) -> np.ndarray:\n \"\"\"\n Get a channel of a numpy array holding possibly multichannel audio data\n\n Args:\n samples: the (multichannel) audio data\n channel: the index of the channel to extract\n\n Returns:\n a numpy array holding a channel of audio data.\n \"\"\"\n return samples if len(samples.shape) == 1 else samples[:, channel]\n\n\ndef sigintHandler(sig, frame):\n print(frame)\n raise KeyboardInterrupt(\"SIGINT (CTRL-C) while waiting\")\n\n\ndef setSigintHandler():\n \"\"\"\n Set own sigint handler to prevent CTRL-C from crashing csound\n\n It will do nothing if this was already set\n \"\"\"\n if _registry.get('sigint_handler_set'):\n return\n original_handler = signal.getsignal(signal.SIGINT)\n signal.signal(signal.SIGINT, sigintHandler)\n _registry['original_sigint_handler'] = original_handler\n _registry['sigint_handler_set'] = True\n\n\ndef removeSigintHandler():\n \"\"\"\n Reset the sigint handler to its original state\n This will do nothing if our own handler was not set\n in the first place\n \"\"\"\n if not _registry.get('sigint_handler_set'):\n return\n signal.signal(signal.SIGINT, _registry['original_sigint_handler'])\n _registry['sigint_handler_set'] = False\n\n\ndef determineNumbuffers(backend: str, buffersize: int) -> int:\n if backend == 'jack':\n info = jacktools.getInfo()\n numbuffers = int(math.ceil(info.blocksize / buffersize))\n else:\n numbuffers = 2\n return numbuffers\n\n\ndef instrResolveArgs(instr: Instr,\n p4: int,\n pargs: list[float] | dict[str, float] | None = None,\n pkws: dict[str, float] | None = None,\n ) -> list[float]:\n \"\"\"\n Resolves pargs, returns pargs starting from p4\n\n Args:\n instr: the Instr instance\n p4: the value for p4\n pargs: pargs passed to the instr, starting with p5\n pkws: named pargs\n\n Returns:\n pargs passed to csound, **starting with p4**\n \"\"\"\n allargs: list[float] = [float(p4)]\n if not pargs and not instr.pargsIndexToDefaultValue and not pkws:\n return allargs\n if isinstance(pargs, list):\n allargs.extend(instr.pargsTranslate(pargs, pkws))\n else:\n if pkws:\n if pargs:\n pargs.update(pkws)\n else:\n pargs = pkws\n allargs.extend(instr.pargsTranslate(kws=pargs))\n return allargs\n\n\ndef addNotifycationAtStop(body: str, notifyDeallocInstrnum: int) -> str:\n notifystr = f'atstop {notifyDeallocInstrnum}, 0.01, 0.0, p1'\n out = \"\\n\".join([notifystr, body])\n return out\n\n\ndef instrWrapBody(body: str,\n instrid: int | str | Sequence[str],\n comment='',\n notifyDeallocInstrnum=0\n ) -> str:\n s = r\"\"\"\ninstr {instrnum} {commentstr}\n {body}\nendin\n \"\"\"\n if notifyDeallocInstrnum > 0:\n body = addNotifycationAtStop(body, notifyDeallocInstrnum)\n commentstr = \"; \" + comment if comment else \"\"\n if isinstance(instrid, (list, tuple)):\n instrid = \", \".join([str(i) for i in instrid])\n s = s.format(instrnum=instrid, body=body, commentstr=commentstr)\n return textwrap.dedent(s)\n\n\ndef addLineNumbers(code: str) -> str:\n lines = [f\"{i:03d} {line}\"\n for i, line in enumerate(code.splitlines(), start=1)]\n return \"\\n\".join(lines)\n\n\n# Maps platform values as given by sys.platform to more readable aliases\n_platformAliases = {\n 'linux2': 'linux',\n 'linux': 'linux',\n 'darwin': 'macos',\n 'macos': 'macos',\n 'win32': 'windows',\n 'windows': 'windows'\n}\n\nplatform = _platformAliases[sys.platform]\n\n\n# Maps possible platform names to names as returned by sys.platform\n_normalizedPlatforms = {\n 'linux': 'linux',\n 'win32': 'win32',\n 'darwin': 'darwin',\n 'windows': 'win32',\n 'macos': 'darwin'\n}\n\ndef platformAlias(platform: str) -> str:\n \"\"\"\n Return the platform alias (macos, windows, linux) for the\n given platform (instead of darwin, win32, etc)\n\n This is the opposite of `normalizePlatform`\n \"\"\"\n out = _platformAliases.get(platform)\n if out is None:\n raise KeyError(f\"Platform {platform} unknown, possible values are\"\n f\" {_platformAliases.keys()}\")\n return out\n\n\ndef normalizePlatform(s:str) -> str:\n \"\"\"Return the platform as given by sys.platform\n\n This is the opposite of `platformAlias`\n \"\"\"\n out = _normalizedPlatforms.get(s)\n if out is None:\n raise KeyError(f\"Platform {s} not known\")\n return out\n\n\ndef resolveOption(prioritizedOptions:list[str], availableOptions:list[str]\n ) -> Optional[str]:\n for opt in prioritizedOptions:\n if opt in availableOptions:\n return opt\n return None\n\n\ndef selectAudioDevice(devices: list[AudioDevice], title='Select device'\n ) -> Optional[AudioDevice]:\n if len(devices) == 1:\n return devices[0]\n outnames = [dev.info() for dev in devices]\n selected = emlib.dialogs.selectItem(items=outnames, title=title)\n if not selected:\n return None\n idx = outnames.index(selected)\n outdev = devices[idx]\n return outdev\n\n\ndef selectMidiDevice(devices: list[MidiDevice], title='Select MIDI device'\n ) -> Optional[MidiDevice]:\n \"\"\"\n Select a midi device from the given devices\n\n Args:\n devices: the midi devices to select from, as returned from ...\n title: the title of the dialog\n\n Returns:\n the deviceid of the selected device, None if no selection was made\n If the devices are input devices, 'all' is added as option. The given\n value can be passed to -M csound option\n \"\"\"\n if len(devices) == 1:\n return devices[0]\n names = [f\"{dev.name} [{dev.deviceid}]\" for dev in devices]\n selected = emlib.dialogs.selectItem(items=names, title=title)\n if not selected:\n return None\n else:\n name, devid = selected[:-1].split(\"[\")\n return next(d for d in devices if d.deviceid == devid)\n\n\n\ndef selectItem(items: list[str], title=\"Select\") -> Optional[str]:\n return emlib.dialogs.selectItem(items=items, title=title)\n\n\ndef instrNameFromP1(p1: Union[float, str]) -> Union[int, str]:\n return int(p1) if isinstance(p1, (int, float)) else p1.split(\".\")[0]\n\n\ndef resolvePfieldIndex(pfield: Union[int, str],\n pfieldNameToIndex: dict[str, int] | None = None\n ) -> int:\n if isinstance(pfield, int):\n return pfield\n if pfield[0] == 'p':\n return int(pfield[1:])\n if not pfieldNameToIndex:\n return 0\n return pfieldNameToIndex.get(pfield, 0)\n\n\ndef isAscii(s: str) -> bool:\n return all(ord(c)<128 for c in s)\n\n\ndef consolidateDelay(pairs: list[float], delay: float\n ) -> tuple[list[float], float]:\n \"\"\"\n (2, 20, 3, 30, 4, 40), delay=3\n\n out = (0, 20, 1, 30, 2, 40), delay=5\n \"\"\"\n t0 = pairs[0]\n assert t0 >= 0\n if t0 == 0:\n return pairs, delay\n out = []\n for t, v in emlib.iterlib.window(pairs, 2, 2):\n out.append(t - t0)\n out.append(v)\n return out, delay + t0\n\n\ndef cropDelayedPairs(pairs: list[float], delay: float, start: float, end: float\n ) -> tuple[list[float], float]:\n \"\"\"\n Crop the given pairs between start and end (inclusive)\n\n Args:\n pairs: a flat list of pairs in the form (t0, value0, t1, value1, ...)\n delay: a time offset to apply to all times\n start: start cropping at this time\n end: end cropping at this time\n\n Returns:\n a tuple (new pairs, new delay)\n\n .. code::\n pairs = (2, 20, 3, 30)\n delay = 3\n abspairs = (5, 20, 6, 30)\n t0 = 4, t1 = 5.5 -> t0 = 5, t1 = 5.5\n\n outpairs = (2, 20, 2.5, 25)\n outdelay = 3\n\n t0 = 5.5, t1 = 6\n cropPairs(pairs, 5.5-3=2.5, 6-3=3)\n\n outpairs = (2.5, 25, 3, 30)\n outdelay = 3\n\n t0 = 1, t1 = 5.5\n cropPairs(pairs, 5-3=2, 5.5-3=2.5)\n outpairs = (2, 20, 2.5, 30)\n outdelay = 3\n \"\"\"\n pairst0 = pairs[0] + delay\n if start < pairst0:\n start = pairst0\n croppedPairs = cropPairs(pairs, start - delay, end - delay)\n return croppedPairs, delay\n\n\ndef cropPairs(pairs: list[float], t0: float, t1: float) -> list[float]:\n pairsStart, pairsEnd = pairs[0], pairs[-2]\n\n if t0 < pairsStart and t1 >= pairsEnd:\n return pairs\n\n if t0 >= pairsEnd or t1 <= pairsStart:\n return []\n\n def interpolate(t: float, times: list[float], values: list[float]\n ) -> tuple[int, float, float]:\n idx = bisect.bisect(times, t)\n if times[idx - 1] == t:\n return idx, t, values[idx - 1]\n else:\n t0, v0 = times[idx-1], values[idx-1]\n t1, v1 = times[idx], values[idx]\n delta = (t - t0) / (t1 - t0)\n v = v0 + (v1 - v0) * delta\n return idx, t, v\n\n times = pairs[::2]\n values = pairs[1::2]\n out: list[float] = []\n if t0 <= times[0]:\n chunkstart = 0\n else:\n chunkstart, t, v = interpolate(t0, times, values)\n out.append(t)\n out.append(v)\n\n if t1 >= times[-1]:\n chunkend = len(times)\n lastbreakpoint = None\n else:\n chunkend, t, v = interpolate(t1, times, values)\n lastbreakpoint = (t, v)\n out.extend(pairs[chunkstart*2:chunkend*2])\n if lastbreakpoint is not None:\n out.extend(lastbreakpoint)\n return out\n\n\ndef splitPairs(pairs: Sequence[float], num: int) -> list[Sequence[float]]:\n \"\"\"\n Split automation pairs\n\n Args:\n pairs: automation data of the form time0, value0, time1, value1, ...\n num: max. number of pairs\n\n Returns:\n list of pair lists\n \"\"\"\n l = len(pairs)\n groups = []\n start = 0\n while start < l - 1:\n end = min(start + num*2, l)\n group = pairs[start:end]\n groups.append(group)\n start = end\n assert sum(len(group) for group in groups) == len(pairs)\n return groups\n\n\ndef aslist(l: Sequence) -> list:\n if isinstance(l, list):\n return l\n return list(l)\n\n\ndef soundfileHtml(sndfile: str,\n withHeader=True,\n withAudiotag=True,\n audiotagMaxDuration=10,\n audiotagWidth='100%',\n audiotagMaxWidth='1200px',\n embedThreshold=2.\n ) -> str:\n \"\"\"\n Returns an HTML representation of this Sample\n\n This can be used within a Jupyter notebook to force the\n html display. It is useful inside a block were it would\n not be possible to put this Sample as the last element\n of the cell to force the html representation\n\n Args:\n withHeader: include a header line with repr text ('Sample(...)')\n withAudiotag: include html for audio playback.\n audiotagMaxDuration: max duration\n\n Returns:\n the HTML repr as str\n\n \"\"\"\n import sndfileio\n import IPython.display\n import emlib.img\n from . import plotting\n import tempfile\n pngfile = tempfile.mktemp(suffix=\".png\", prefix=\"plot-\")\n samples, info = sndfileio.sndget(sndfile)\n if info.duration < 20:\n profile = 'highest'\n elif info.duration < 40:\n profile = 'high'\n elif info.duration < 180:\n profile = 'medium'\n else:\n profile = 'low'\n plotting.plotSamples(samples, samplerate=info.samplerate, profile=profile, saveas=pngfile)\n img = emlib.img.htmlImgBase64(pngfile) # , maxwidth='800px')\n if info.duration > 60:\n durstr = emlib.misc.sec2str(info.duration)\n else:\n durstr = f\"{info.duration:.3g}\"\n if withHeader:\n s = (f\"Soundfile: '{sndfile}', duration: {durstr}, \"\n f\"sr: {info.samplerate}, \"\n f\"numchannels: {info.channels})
\")\n else:\n s = ''\n s += img\n if withAudiotag and info.duration/60 < audiotagMaxDuration:\n maxwidth = audiotagMaxWidth\n # embed short audiofiles, the longer ones are written to disk and read\n # from there\n if info.duration < embedThreshold:\n audioobj = IPython.display.Audio(samples.T, rate=info.samplerate)\n audiotag = audioobj._repr_html_()\n else:\n os.makedirs('tmp', exist_ok=True)\n outfile = tempfile.mktemp(suffix='.mp3')\n sndfileio.sndwrite(outfile, samples=samples, sr=info.samplerate)\n audioobj = IPython.display.Audio(outfile)\n audiotag = audioobj._repr_html_()\n audiotag = audiotag.replace('audio controls=\"controls\"',\n fr'audio controls style=\"width: {audiotagWidth}; max-width: {maxwidth};\"')\n s += \"
\" + audiotag\n return s\n\n\nsafeColors = {\n 'blue1': '#9090FF',\n 'blue2': '#6666E0',\n 'red1': '#FF9090',\n 'red2': '#E08080',\n 'green1': '#90FF90',\n 'green2': '#8080E0',\n 'magenta1': '#F090F0',\n 'magenta2': '#E080E0',\n 'cyan': '#70D0D0',\n 'grey1': '#BBBBBB',\n 'grey2': '#A0A0A0',\n 'grey3': '#909090'\n}","repo_name":"gesellkammer/csoundengine","sub_path":"csoundengine/internalTools.py","file_name":"internalTools.py","file_ext":"py","file_size_in_byte":15506,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"5213549715","text":"import torch\nfrom torch.distributions import Categorical\nfrom tqdm import tqdm\n\nfrom ccil.utils.data import Trajectory\nfrom ccil.utils.utils import random_mask_from_state\n\n\nclass PolicyRunner:\n def __init__(self, env, agent, state_encoder):\n self.env = env\n self.agent = agent\n self.state_encoder = state_encoder\n\n def run_episode(self):\n state, done = self.env.reset(), False\n trajectory = None\n while not done:\n x = self.state_encoder.step(state, trajectory)\n action = self.agent(x).item()\n\n prev_action, prev_state = action, state\n state, rew, done, info = self.env.step(action)\n\n trajectory = Trajectory.add_step(\n trajectory, prev_state, prev_action, rew, None, info=info\n )\n trajectory.finished()\n return trajectory\n\n def run_num_steps(self, num_steps, verbose=False):\n progress_bar = tqdm(total=num_steps, disable=not verbose)\n steps = 0\n trajectories = []\n while True:\n trajectory = self.run_episode()\n steps += len(trajectory)\n progress_bar.update(len(trajectory))\n trajectories.append(trajectory)\n if steps >= num_steps:\n break\n\n progress_bar.close()\n return trajectories\n\n def run_num_episodes(self, num_episodes, verbose=False):\n trajectories = []\n for _ in tqdm(range(num_episodes), disable=not verbose):\n trajectory = self.run_episode()\n trajectories.append(trajectory)\n return trajectories\n\n\ndef run_fixed_mask(env, policy_model, state_encoder, mask, num_episodes):\n agent = FixedMaskPolicyAgent(policy_model, mask)\n runner = PolicyRunner(env, agent, state_encoder)\n trajectories = runner.run_num_episodes(num_episodes)\n return trajectories\n\n\ndef hard_discrete_action(output):\n return output.argmax(-1)\n\n\ndef sample_discrete_action(output):\n return Categorical(logits=output).sample()\n\n\nclass RandomMaskPolicyAgent:\n def __init__(self, policy, output_transformation=hard_discrete_action):\n self.policy = policy\n self.device = next(policy.parameters()).device\n self.output_transformation = output_transformation\n\n def __call__(self, state):\n x = torch.tensor(state, device=self.device, dtype=torch.float)[None]\n mask = random_mask_from_state(x)\n output = self.policy.forward(x, mask)\n action = self.output_transformation(output)\n return action\n\n\nclass FixedMaskPolicyAgent:\n def __init__(self, policy, mask, output_transformation=hard_discrete_action):\n self.policy = policy\n self.device = next(policy.parameters()).device\n self.mask = torch.tensor(mask, device=self.device, dtype=torch.float)\n self.output_transformation = output_transformation\n\n def __call__(self, state):\n x = torch.tensor(state, device=self.device, dtype=torch.float)[None]\n output = self.policy.forward(x, self.mask)\n action = self.output_transformation(output)\n return action\n","repo_name":"pimdh/causal-confusion","sub_path":"ccil/utils/policy_runner.py","file_name":"policy_runner.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"62"} +{"seq_id":"41440040808","text":"'''Ejercicio 2. Escribe un programa que analice el fichero de texto cuyo nombre se especifica interactivamente y muestre por pantalla la siguiente información:\n\ta) Para cada línea del fichero:\n\t\t• El número total de caracteres y de palabras, y la longitud media de una palabra.\n\t\t• La letra con que termina la primera palabra y el número de palabras que terminan con esa letra.\n\tb) Para el fichero completo:\n\t\t• El número total de caracteres, palabras y líneas del fichero, y la longitud media de una palabra.\n\t\t• El número de la línea en la que hay más palabras, junto con el número de palabras en esa línea.'''\n\nclass FileExaminer:\n\t\n\tfrom io import open\n\n\tdef __init__(self):\n\t\t\tpass\n\n\tdef analiza(self):\n\t\tfrom operator import truediv \n\t\timport string\n\n\t\tmyFile= open (\"C:\\\\Users\\\\Anna Cilona\\\\Desktop\\\\Python\\\\Ejercicios\\\\EjHoja6\\\\6.2\\\\Odissea.txt\",\"r\", encoding=\"utf-8\")\n\t\tlistaLineas=myFile.readlines()\n\n\t\talmacen=0\n\t\tnumLinea=0\n\t\tcontaLineas=0\n\t\tcontaPalabrasTotales=0\n\t\tcontaCaracteresTotales=0\n\t\tlongitudPalabra=0\n\t\tnumPalabrasporLinea=[]\n\t\tlistaLenCharporLinea=[]\n\t\tlongPalporLinea=[]\n\t\tlistaUltimaLetraPalabra=[]\n\t\t#listaOccurrencesUltimaLetra=[]\n\n\t\tfor i in range(len(listaLineas)):\n\t\t\tlongPal=0\n\t\t\tcontaLineas=contaLineas+1\n\t\t\tlistaPalabras=listaLineas[i].split()\n\t\t\tif listaPalabras[0][-1].isalpha():\n\t\t\t\tlistaUltimaLetraPalabra.append(listaPalabras[0][-1].lower())\n\t\t\telse:\n\t\t\t\tlistaUltimaLetraPalabra.append(listaPalabras[0][-2].lower())\n\t\t\t \n\t\t\tcontaPalabrasTotales=contaPalabrasTotales+len(listaPalabras)\n\t\t\tnumPalabrasporLinea.append(len(listaPalabras))\n\t\t\tif almacen= INF: #갈 수 없을 때\n print('-1')\nelse:\n print(answer)\n\n\n\n#Point\n#범위가 크기 때문에 다익스트라 알고리즘 사용\n\n#출발점을 3번 구해 각각 더해주면 된다\n\n#2가지로 나눌 수 있다 (이 중에서 최솟값 선택)\n#1번에서 출발 + x에서 출발 + y에서 출발 \n#1번에서 출발 + y에서 출발 + x에서 출발\n\n#함수로 풀기","repo_name":"geunu97/Algorithm_Python","sub_path":"백준/스텝25/Baekjoon_Step25_Q2.py","file_name":"Baekjoon_Step25_Q2.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"73349537476","text":"#!/usr/bin/env python3\r\n\r\nfrom random import choice\r\n\r\n\"\"\"This program plays a game of Rock, Paper, Scissors between two Players,\r\nand reports both Player's scores each round.\"\"\"\r\n\r\nmoves = ['rock', 'paper', 'scissors']\r\n\r\n\"\"\"The Player class is the parent class for all of the Players\r\nin this game\"\"\"\r\n\r\n\r\nclass Player:\r\n def move(self):\r\n return \"rock\"\r\n\r\n def learn(self, last_move):\r\n pass\r\n\r\n\r\nclass HumanPlayer(Player):\r\n def move(self):\r\n myChoice = input(\"choose a move (paper, scissors, rock): \").lower()\r\n while myChoice not in moves:\r\n print(\"Not a valid move!\")\r\n return self.move()\r\n return myChoice\r\n\r\n\r\nclass RandomPlayer(Player):\r\n def move(self):\r\n return choice(moves)\r\n\r\n\r\nclass ReflectPlayer(Player):\r\n def __init__(self):\r\n Player.__init__(self)\r\n self.last_move = None\r\n\r\n def learn(self, last_move):\r\n self.last_move = last_move\r\n\r\n def move(self):\r\n if (self.last_move is None):\r\n return Player.move(self)\r\n return self.last_move\r\n\r\n\r\nclass CyclePlayer(Player):\r\n def __init__(self):\r\n Player.__init__(self)\r\n self.last_move = None\r\n\r\n def move(self):\r\n if (self.last_move is None):\r\n move = Player.move(self)\r\n else:\r\n index = moves.index(self.last_move) + 1\r\n if index >= len(moves):\r\n index = 0\r\n move = moves[index]\r\n self.last_move = move\r\n return move\r\n\r\n\r\ndef beats(one, two):\r\n return ((one == 'rock' and two == 'scissors') or\r\n (one == 'scissors' and two == 'paper') or\r\n (one == 'paper' and two == 'rock'))\r\n\r\n\r\nclass Game:\r\n def __init__(self, p1, p2):\r\n self.p1 = p1\r\n self.p2 = p2\r\n self.score1 = 0\r\n self.score2 = 0\r\n\r\n def play_round(self):\r\n move1 = self.p1.move()\r\n move2 = self.p2.move()\r\n\r\n if (beats(move1, move2) is True):\r\n print(\"PLAYER WINS :)\")\r\n self.score1 += 1\r\n elif (move1 == move2):\r\n print(\"It's a DRAW\")\r\n pass\r\n else:\r\n print(\"COMPUTER WINS\")\r\n self.score2 += 1\r\n\r\n print(f\"Player Move: {move1}, Computer Move: {move2}\")\r\n print(f\"Player Score: {self.score1}, Computer Score: {self.score2}\")\r\n self.p1.learn(move1)\r\n self.p2.learn(move1)\r\n\r\n def play_game(self):\r\n print(\"Game start!\")\r\n for round in range(3):\r\n print(f\"Round {round+1}:\")\r\n self.play_round()\r\n if (self.score1 > self.score2):\r\n print(\"PLAYER is the WINNER :)\")\r\n elif (self.score1 == self.score2):\r\n print(\"No one is the Winner\")\r\n else:\r\n print(\"COMPUTER is the WINNER\")\r\n print(\"Game over!\")\r\n\r\n\r\nif __name__ == '__main__':\r\n Player1 = HumanPlayer()\r\n Player2 = ReflectPlayer()\r\n game = Game(Player1, Player2)\r\n game.play_game()","repo_name":"eraldomuha/software_development_projects","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31845816708","text":"#!/usr/bin/python3\n\n''' Module for printing square '''\n\n\ndef print_square(size):\n ''' Prints a square with # characters\n\n Args:\n size (int): length of the square\n Raises:\n TypeError: if size is not an integer\n ValueError: if size < 0\n '''\n square = ''\n if not isinstance(size, int):\n raise TypeError('size must be an integer')\n if size < 0:\n raise ValueError('size must be >= 0')\n for i in range(size):\n square += '#' * size + '\\n'\n print(square, end='')\n","repo_name":"LaudRam/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3650428473","text":"import glob\n\nimport MeCab\n\nimport pickle\n\nm = MeCab.Tagger('-Ochasen')\n\ntag_feats = {}\nfor name in glob.glob('courpus/*'):\n for index, line in enumerate(open(name)):\n try:\n tag, line = line.strip().split('\\t')\n except Exception as ex:\n print(ex)\n continue\n feat = [x.split('\\t').pop(0) for x in filter(lambda x:'名詞-代名詞-一般' in x or '名詞-一般' in x, m.parse(line).strip().split('\\n'))]\n if feat == []:\n continue\n if index%5000 == 0:\n print(feat)\n print(line)\n if tag_feats.get(tag) is None:\n tag_feats[tag] = []\n tag_feats[tag].append( feat )\n\npickle.dump(tag_feats, open(f'wakati/tag_feats.pkl', 'wb')) \n","repo_name":"GINK03/5ch-blog-depression-machine-learning","sub_path":"20-wakati.py","file_name":"20-wakati.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"38081408956","text":"import asyncio\nimport random\nimport hashlib\nimport hmac\nimport json\nimport threading\nfrom datetime import datetime, timedelta\n\nimport aiohttp\nimport discord\nfrom flask import request, Flask, render_template, Response\n\nimport sambot\n\nfrom environment import Environment\n\n# Create a child thread for the bot to run on.\nfrom models import base_model, BaseModel\nfrom models.model_interfaces import StreamLiveNotificationModelInterface\n\nsambot.bot.loop.create_task(sambot.run())\nthreading.Thread(target=sambot.bot.loop.run_forever).start()\n\n# Create the Flask app\napp = Flask(__name__)\n\n\nasync def send_notification(channel, embed):\n await channel.send('Hey @everyone, it\\'s stream time!', embed=embed)\n\n\nasync def renew_all_subscriptions(startup: bool = False):\n headers = {\n 'client-id': Environment.instance().TWITCH_CLIENT_ID,\n 'Authorization': f'Bearer {Environment.instance().TWITCH_AUTH}'\n }\n one_day = 60 * 60 * 24\n old_uuid = 'x'\n\n while True:\n with open('/home/pi/sambot_uuid', 'r') as uuid_file:\n new_uuid = uuid_file.readline().rstrip('\\n')\n \n if startup or old_uuid != new_uuid:\n subscriptions = StreamLiveNotificationModelInterface.get_all()\n else:\n subscriptions = \\\n StreamLiveNotificationModelInterface.get_expiring_soon()\n for subscription in subscriptions:\n # Update the subscription's profile picture URL.\n url = f'https://api.twitch.tv/helix/users?id=' \\\n f'{subscription.streamer_twitch_id}'\n async with aiohttp.ClientSession() as session:\n async with session.get(url, headers=headers) as resp:\n if resp.status != 200:\n print(f'Could not reach Twitch backend when attempting '\n f'to update the profile picture for the streamer '\n f'{subscription.streamer_display_name}')\n return\n data = await resp.json()\n if len(data['data']) == 0:\n print(f'Could not find the Twitch streamer: '\n f'{subscription.streamer_display_name}')\n data = data['data'][0]\n subscription.profile_image_url = data['profile_image_url']\n # Renew each subscription.\n payload = {\n 'hub.callback': f'https://{new_uuid}.loca.lt/webhook',\n 'hub.mode': 'subscribe',\n 'hub.topic': f'https://api.twitch.tv/helix/streams?user_id='\n f'{subscription.streamer_twitch_id}',\n 'hub.lease_seconds': one_day,\n 'hub.secret': Environment.instance().TWITCH_SECRET\n }\n now = datetime.now()\n subscription.subscription_length = one_day\n subscription.created = now\n subscription.expires = now + timedelta(\n seconds=subscription.subscription_length)\n StreamLiveNotificationModelInterface.save_instance(subscription)\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url='https://api.twitch.tv/helix/webhooks/hub',\n headers=headers,\n data=payload) as resp:\n print(await resp.text())\n old_uuid = new_uuid\n startup = False\n await asyncio.sleep(60) # Renew subscriptions every minute.\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/webhook', methods=['GET', 'POST'])\ndef webhook_confirm():\n if request.method == 'GET':\n challenge = request.args.get('hub.challenge')\n return Response(challenge, status=200)\n else:\n received_signature = request.headers.get('X-Hub-Signature')\n generated_signature = 'sha256=' + hmac.new(\n Environment.instance().TWITCH_SECRET.encode('utf-8'),\n request.data,\n hashlib.sha256\n ).hexdigest()\n if received_signature is None or \\\n received_signature != generated_signature:\n return Response(render_template('404.html'), status=404)\n\n data = json.loads(request.data).get('data')\n if not data or data[0].get('type') != 'live':\n return Response(status=200)\n data = data[0]\n # Get the StreamLiveNotification objects for this streamer.\n notifications = \\\n StreamLiveNotificationModelInterface.get_all_for_streamer(\n streamer_twitch_id=int(data['user_id'])\n )\n for notification in notifications:\n channel = sambot.bot.get_channel(notification.notify_channel)\n image_url = data.get('thumbnail_url').replace(\n '{width}', '320', 1\n ).replace('{height}', '180', 1) + f'?r={random.randint(1, 99999)}'\n embed = discord.Embed(title=data.get('title'),\n url=f'https://www.twitch.tv/'\n f'{data.get(\"user_name\")}',\n color=0x6441a5)\n embed.set_author(\n name=f'{data.get(\"user_name\")} just went live!',\n url=f'https://www.twitch.tv/{data.get(\"user_name\")}',\n icon_url=\"https://upload.wikimedia.org/wikipedia/\"\n \"commons/6/6c/Yip_Man.jpg\")\n embed.set_thumbnail(url=notification.profile_image_url)\n embed.add_field(name='Game',\n value=data.get('game_name'),\n inline=True)\n embed.add_field(name='Viewers',\n value=data.get('viewer_count'),\n inline=True)\n embed.set_footer(text=notification.footer)\n embed.set_image(url=image_url)\n sambot.bot.loop.create_task(send_notification(channel, embed))\n notification.last_notified = datetime.now()\n StreamLiveNotificationModelInterface.save_instance(notification)\n return Response(status=200)\n\n\nstartup_loop = asyncio.new_event_loop()\nstartup_loop.create_task(renew_all_subscriptions(startup=True))\n#renew_loop = asyncio.new_event_loop()\n#renew_loop.create_task(renew_all_subscriptions())\n#threading.Thread(target=renew_loop.run_forever).start()\nthreading.Thread(target=startup_loop.run_forever).start()\n","repo_name":"Sam-Macpherson/sambot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"41403572408","text":"#!/usr/bin/env python2\r\n#\r\n# coding: utf-8\r\n#\r\n# Plot pseudo-hierarhical Bayesian model simulation (only rely on individual\r\n# MCMC chains)\r\n#\r\n\r\nfrom __future__ import print_function\r\nimport sys\r\nsys.path.append('../lib')\r\nimport os\r\nimport numpy as np\r\nimport matplotlib\r\nif not '--show' in sys.argv:\r\n matplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\nimport seaborn as sns\r\n\r\n\r\n# Set parameter transformation\r\nimport parametertransform\r\ntransform_to_model_param = parametertransform.log_transform_to_model_param\r\ntransform_from_model_param = parametertransform.log_transform_from_model_param\r\n\r\n\r\n# About files\r\nfile_name = 'syn-101'\r\ntemperature = 25.0\r\nfit_seed = '542811797'\r\n\r\n# Control fitting seed --> OR DONT\r\n# control_seed = np.random.randint(0, 2**30)\r\ncontrol_seed = int(fit_seed)\r\nprint('Using seed: ', control_seed)\r\nnp.random.seed(control_seed)\r\n\r\nload_data = './out-mcmc/syn-101-testnexp'\r\nsaveas = './figs/testnexp'\r\nn_non_model_param = 1\r\nwhich_hyper_func = 1\r\nvariable_names = [r'$g_{Kr}$', r'$p_1$', r'$p_2$', r'$p_3$', r'$p_4$',\r\n r'$p_5$', r'$p_6$', r'$p_7$', r'$p_8$', 'noise']\r\n\r\nnexp = 125\r\ntestnexps = [20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 125]\r\n\r\nif not os.path.isdir(os.path.dirname(saveas)):\r\n os.makedirs(os.path.dirname(saveas))\r\n\r\n\r\n# True values\r\npath2mean = '../room-temperature-only/kylie-room-temperature/' \\\r\n + 'last-solution_C5.txt'\r\ntrue_mean = np.loadtxt(path2mean)\r\n# Change conductance unit nS->pS (new parameter use V, but here mV)\r\ntrue_mean[0] = true_mean[0] * 1e3\r\ntrue_mean = transform_from_model_param(true_mean)\r\ncov_seed = 101\r\ntrue_cov = np.loadtxt('./out/cov-%s.txt' % cov_seed)\r\ntrue_cor = np.loadtxt('./out/corr-%s.txt' % cov_seed)\r\ntrue_std = np.sqrt(np.diag(true_cov))\r\n\r\n\r\n#\r\n# Save mcmc means and simple log mean\r\n#\r\nfor i in range(nexp):\r\n p = np.loadtxt('%s/%s/solution-%s.txt' % ('./out', \\\r\n file_name + '-mcmcmean', i))\r\n\r\n\r\n#\r\n# Run pseudo HBM and test nexp effect\r\n#\r\n\r\n\r\nerr_mean_y = []\r\nerr_mean_x = []\r\nerr_std_y = []\r\nerr_std_x = []\r\nerr_cor_y = []\r\nerr_cor_x = []\r\n\r\nfor testnexp in testnexps:\r\n\r\n mean = np.loadtxt('%s/%s-pseudohbm-lognorm-mean-nexp-%s.txt' \\\r\n % (load_data, file_name, testnexp))\r\n with open('%s/%s-pseudohbm-lognorm-cov-nexp-%s.pkl' \\\r\n % (load_data, file_name, testnexp), 'rb') as f:\r\n cov = pickle.load(f)\r\n\r\n std = np.zeros((cov.shape[0], cov.shape[1]))\r\n cor = np.zeros(cov.shape)\r\n for i, s in enumerate(cov):\r\n D = np.sqrt(np.diag(s))\r\n std[i, :] = D[:]\r\n c = s / D / D[:, None]\r\n cor[i, :, :] = c[:, :]\r\n\r\n err_mean = (mean - true_mean) / np.abs(true_mean)\r\n err_std = (std - true_std) / true_std\r\n err_cor = cor - true_cor\r\n\r\n err_mean_y.extend(np.sqrt(np.mean(err_mean ** 2, axis=1)))\r\n err_mean_x.extend(len(err_mean) * [testnexp])\r\n err_std_y.extend(np.sqrt(np.mean(err_std ** 2, axis=1)))\r\n err_std_x.extend(len(err_std) * [testnexp])\r\n err_cor_y.extend(np.sqrt(np.mean(np.mean(err_cor ** 2, axis=2), axis=1)))\r\n err_cor_x.extend(len(err_cor) * [testnexp])\r\n\r\n\r\n# Mean\r\nplt.figure(figsize=(12, 6))\r\nsns.violinplot(x=err_mean_x, y=err_mean_y, zorder=1)\r\n\r\nplt.ylabel(r'RMSPE of mean', fontsize=32)\r\nplt.xlabel(r'$N_{exp}$', fontsize=32)\r\nplt.xticks(fontsize=24)\r\nplt.yticks(fontsize=24)\r\nplt.savefig('%s-mean-violin.png' % saveas, bbox_inches='tight', dpi=300)\r\nplt.savefig('%s-mean-violin.pdf' % saveas, format='pdf',\r\n bbox_inches='tight')\r\n\r\n\r\n# Std\r\nplt.figure(figsize=(12, 6))\r\nsns.violinplot(x=err_std_x, y=err_std_y, zorder=1)\r\n\r\nplt.ylabel(r'RMSPE of std', fontsize=32)\r\nplt.xlabel(r'$N_{exp}$', fontsize=32)\r\nplt.xticks(fontsize=24)\r\nplt.yticks(fontsize=24)\r\nplt.savefig('%s-std-violin.png' % saveas, bbox_inches='tight', dpi=300)\r\nplt.savefig('%s-std-violin.pdf' % saveas, format='pdf',\r\n bbox_inches='tight')\r\n\r\n\r\n# Cov\r\nplt.figure(figsize=(12, 6))\r\nsns.violinplot(x=err_cor_x, y=err_cor_y, zorder=1)\r\n\r\nplt.ylabel(r'RMSE of correlation', fontsize=32)\r\nplt.xlabel(r'$N_{exp}$', fontsize=32)\r\nplt.xticks(fontsize=24)\r\nplt.yticks(fontsize=24)\r\nplt.savefig('%s-cor-violin.png' % saveas, bbox_inches='tight', dpi=300)\r\nplt.savefig('%s-cor-violin.pdf' % saveas, format='pdf',\r\n bbox_inches='tight')\r\n","repo_name":"CardiacModelling/hERGRapidCharacterisation","sub_path":"syn-room-temperature-only/pseudohbm-covergence.py","file_name":"pseudohbm-covergence.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"20994389153","text":"import math, statistics\nfrom scipy.stats import binom\nimport peak\n\ndef getTheoreticalEnvelope(precMz, charge, isotopeRange):\n nCarbons = estimateCarbons(precMz, charge)\n res = [0]\n for i in range(1, isotopeRange[\"compareSize\"]):\n res.append(binom.pmf(i - 1, nCarbons, 0.011))\n return res\n\n\ndef estimateCarbons(mz, z):\n \"\"\"\n Estimates the number of carbons in a peptide based only on its precursor m/z and charge.\n Original monocle: mz = 111; carbons = 5.1\n Senko et al. 1995: mz = 111.1254; carbons = 4.9384\n DKS Uniprot TREMBL 2019_08: mz = 110.3963; carbons = 4.9243\n \"\"\"\n protonMass = 1.007276466812\n return math.floor((((mz - protonMass) * z) / 111) * 5.1)\n\n\ndef extract(scans, precMz, charge, isotopeRange):\n diff = 1.00286864 # Averagine difference(?)\n # diff = 1.003355 # C13 - C12 difference\n protonMass = 1.007276466812\n left, compareSize, nIsotopes = isotopeRange[\"left\"], isotopeRange[\"compareSize\"], isotopeRange[\"isotopes\"]\n mzArray = [[] for i in range(nIsotopes)]\n intArray = [[] for i in range(nIsotopes)]\n for scan in scans:\n for i in range(nIsotopes):\n matchMz = precMz + (((i + left) * diff) / charge)\n idx = peak.match(scan, matchMz, 3, \"ppm\") # Hard-coded tolerance in extract, 3ppm\n if idx >= 0:\n mz = scan[\"m/z array\"][idx]\n intensity = scan[\"intensity array\"][idx]\n mzArray[i].append(mz)\n intArray[i].append(intensity)\n nMax = 0\n avgIntensity = []\n for intensities in intArray:\n if len(intensities) > nMax:\n nMax = len(intensities)\n if len(intensities) > 0:\n avgIntensity.append(sum(intensities) / len(intensities))\n else:\n avgIntensity.append(0)\n\n output = {\"mz\": mzArray, \"intensity\": intArray, \"avgIntensity\": avgIntensity, \"maxPeakCount\": nMax}\n return output\n\n\ndef scaleByPeakCount(x, env, i):\n if env[\"maxPeakCount\"] > 0:\n for j in range(i, i + len(x)):\n x[j - i] *= len(env[\"mz\"][j]) / env[\"maxPeakCount\"]\n return x\n","repo_name":"cjhjhj/myMonocle","sub_path":"peptideEnvelope.py","file_name":"peptideEnvelope.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4052416761","text":"#!/usr/bin/env python\n\n\"\"\"\nscript: txt-csv.py\nauthor: Simon Lindgren\nnon-standard packages: pandas\nfunctionality: creates a csv file of all txt files in the working directory\n\"\"\"\n\nimport glob\nimport pandas as pd\n\n# get all filenames in the data subdir\nfs = glob.glob('data/*.txt')\n\n# create a dataframe with an empty column named 'text'\ncols = ['text']\ndf = pd.DataFrame(columns = cols)\n\n# iterate over all filenames\nfor f in fs:\n with open(f, \"r\") as file: # open file\n d = file.read() # read file contents\n df.loc[f[5:-4]] = [d] # write filename (-5 first/4 last chars) as index, and file context to the 'text' column\n\ndf.index.name = 'file' # set the header for the index column\ndf.to_csv('raw.csv', sep=',') # save everything to a csv file","repo_name":"simonlindgren/txtls","sub_path":"txt-csv.py","file_name":"txt-csv.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19350562355","text":"import ast\nfrom contextlib import contextmanager\nfrom types import ModuleType\nfrom typing import List, Generator, Tuple\nimport test\nimport inspect\nfrom collections import defaultdict, deque\nfrom typeguards import _isClassDef, _isScopeCreator, _isFunctionDef, _isCall\nimport itertools\nfrom collections import defaultdict\nimport base64\nfrom functools import lru_cache\n\n\nclass Context(defaultdict):\n def __init__(self, *, obj=None, name='global'):\n super().__init__(list)\n\n self.__parent = None\n self.__children = []\n\n # name should be immutable\n self._obj: ast = obj\n self._name: str = name\n\n # defined in the constructor since it's an immutable property\n self.isTestingContext = isinstance(\n obj, ast.ClassDef) and Context.__checkClassBase(obj)\n\n @staticmethod\n def __checkClassBase(node: ast.ClassDef) -> bool:\n return any(map(lambda base: base.value.id == 'unittest' and base.attr == 'TestCase', node.bases))\n\n @property\n def _parent(self):\n return self.__parent\n\n @property\n def _children(self):\n return self.__children\n\n @property\n def obj(self):\n return self._obj\n\n @_parent.setter\n def _parent(self, parent):\n assert(parent is not None)\n self.__parent = parent\n\n def addChild(self, child: dict):\n child._parent = self\n self.__children.append(child)\n\n def addNode(self, node: ast):\n if _isClassDef(node):\n self['class'] = node\n elif _isFunctionDef(node):\n self['function'] = node\n\n @property\n def path(self):\n prefix = (self.__parent.path if self.__parent else '')\n return prefix + '#' + self._name\n\n\nclass Declaration:\n def __init__(self, source: ast, name: str):\n self.source = source\n self.name = name\n self.references = []\n\n def __repr__(self):\n return \"{} w/ references {}\".format(self.name, self.references)\n\n\n_global_context = Context()\n\n\n@lru_cache(maxsize=100)\ndef encodeID(name: str, context: Context):\n return base64.b64encode(context.path + '#' + name)\n\n\n@lru_cache(maxsize=100)\ndef decodeID(id: str):\n return base64.b64decode(id).split('#')[-1]\n\n\ndef _extractFunctionCall(node: ast.Call, context: Context):\n referenceFunctionName = node.func.id\n callContext = context\n\n # essentially, we climb our context to find the most appropriate one\n while context != None:\n\n if referenceFunctionName in context:\n context[referenceFunctionName].references.append(node)\n node.context = callContext\n return\n\n context = context._parent\n\n print(\"Could not find function for {}\".format(node.func.id))\n\n\ndef swapContext(parentContext: Context, node: ast) -> Tuple[Context, Context]:\n print(inspect.getsource(node.__class__))\n context = Context(obj=node, name=node.name)\n parentContext.addChild(context)\n\n context.isTestingContext = parentContext.isTestingContext or context.isTestingContext\n parentContext[node.name] = Declaration(node, node.name)\n\n return parentContext, context\n\n\ndef findTestCaseCalls(node: ast, context: Context = _global_context):\n\n if _isScopeCreator(node):\n print('creating scope for ', node.name)\n # we ought to keep looking through oulr queue, especially through our new citizens\n _, context = swapContext(context, node)\n\n elif _isCall(node) and context.isTestingContext:\n _extractFunctionCall(node, context)\n\n for nextNode in ast.iter_child_nodes(node):\n findTestCaseCalls(nextNode, context)\n\n\ndef findUnitTestClass(node: ast.ClassDef):\n findTestCaseCalls(node)\n\n\n# main thread starter that starts everything\ndef doWork(module: str, target: str):\n with open(\"{}.py\".format(module), \"r\") as source:\n tree = ast.parse(source.read())\n\n # just look one level down for the test\n for node in ast.iter_child_nodes(tree):\n findUnitTestClass(node)\n\n stack = [_global_context]\n\n while stack:\n context = stack.pop()\n stack.extend(context._children)\n\n for functionName, declaration in context.items():\n\n if len(declaration.references) > 0 and functionName.strip() == target.strip():\n print(\"Evaluating {}...\", functionName)\n dump = ast.dump(declaration.references[0].context.obj)\n\n print(eval(compile(dump, filename='', mode='exec')))\n","repo_name":"sonny3690/unittest-resolver","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"14068209435","text":"# Experiment 5: Vigenere Cipher\n\ndef getChar(code:int) -> str:\n return chr(code % 26 + 97)\n\ndef getCode(char:str) -> int:\n return (ord(char) - 97)\n\n\ndef encrypt(plainText:str, key:str) -> str:\n cipher = ''\n\n splitText = [getCode(char) for char in plainText]\n\n splitKey = []\n i = 0\n for _ in splitText:\n splitKey.append(getCode(key[i % len(key)]))\n i += 1\n\n splitCipher = []\n for i in range(len(splitKey)):\n splitCipher.append(getChar(splitText[i] + splitKey[i]))\n\n for char in splitCipher:\n cipher += char\n\n return cipher\n\n\ndef decrypt(cipherText:str, key:str) -> str:\n message = ''\n\n splitText = [getCode(char) for char in cipherText]\n\n splitKey = []\n i = 0\n for _ in splitText:\n splitKey.append(getCode(key[i % len(key)]))\n i += 1\n\n splitCipher = []\n for i in range(len(splitKey)):\n splitCipher.append(getChar(splitText[i] - splitKey[i]))\n\n for char in splitCipher:\n message += char\n\n return message\n\n\nif __name__ == \"__main__\":\n # message = input('Enter your message: ')\n # key = input('Enter key: ')\n message = 'wearediscoveredsaveyourself'\n key = 'deceptive'\n\n cipher = encrypt(message.lower(), key.lower())\n print(cipher)\n\n message = decrypt(cipher.lower(), key.lower())\n print(message)\n","repo_name":"JayNakum/LearningPython","sub_path":"Information Security/Lab4.py","file_name":"Lab4.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6707599928","text":"# -*- coding: utf-8 -*-\r\n\r\ntry:\r\n from unittest import mock\r\nexcept ImportError:\r\n import mock\r\n\r\nfrom unittest import TestCase\r\nfrom types import ModuleType\r\nfrom request_handler.response_generator import ResponseGenerator\r\nget_response = ResponseGenerator()\r\n\r\nrequest = {\r\n \"id\": \"a50ea3c6-6f9b-475e-aba8-aa8de519261e\",\r\n \"timestamp\": \"2018-04-24T09:41:31.758Z\",\r\n \"lang\": \"en\",\r\n \"result\": {\r\n \"source\": \"agent\",\r\n \"resolvedQuery\": \"read the next section\",\r\n \"action\": \"smartlearningreadoutlessonSegment.smartlearningreadoutlessonSegment-custom\",\r\n \"actionIncomplete\": False,\r\n \"parameters\": {\r\n \"operations\" : \"next\"\r\n },\r\n \"contexts\": [{\r\n \"name\": \"smartlearningreadoutlessonsegment-followup\",\r\n \"parameters\": {\r\n \"lesson_section.original\": \"read aloud\",\r\n \"lesson_name.original\": \"lesson 1\",\r\n \"lesson_name\": \"What�s the Weather Like?\",\r\n \"lesson_section\": \"Read Aloud\",\r\n \"introductorySection.original\": \"section\",\r\n \"introductorySection\": \"section\"\r\n },\r\n \"lifespan\": 3\r\n },\r\n {\r\n \"name\": \"operations-followup\",\r\n \"parameters\": {\r\n \"lesson_section.original\": \"read aloud\",\r\n \"lesson_name.original\": \"lesson 1\",\r\n \"lesson_name\": \"What's the Weather Like\",\r\n \"lesson_section\": \"Read Aloud\",\r\n \"lesson_sub_section\" : \"what's the weather like\",\r\n \"query\": \"read the section read aloud\",\r\n \"introductorySection.original\": \"section\",\r\n \"previous_response\": \"Read Aloud section contains 5 sections, They are Purpose for Listening WHAT'S THE WEATHER LIKE Comprehension Questions Word Work Characteristics Check for Understanding\",\r\n \"intent\": \"lessonSegment - section - subSection\",\r\n \"introductorySection\": \"section\"\r\n },\r\n \"lifespan\": 1\r\n },\r\n {\r\n \"name\": \"lessonsegment-section-followup\",\r\n \"parameters\": {\r\n \"lesson_section.original\": \"read aloud\",\r\n \"introductorySection.original\": \"section\",\r\n \"lesson_section\": \"Read Aloud\",\r\n \"introductorySection\": \"section\"\r\n },\r\n \"lifespan\": 2\r\n },\r\n {\r\n \"name\": \"smartlearningreadoutlessonsegmentsection-followup\",\r\n \"parameters\": {\r\n \"lesson_section.original\": \"read aloud\",\r\n \"introductorySection.original\": \"section\",\r\n \"lesson_section\": \"Read Aloud\",\r\n \"introductorySection\": \"section\"\r\n },\r\n \"lifespan\": 2\r\n }\r\n ],\r\n \"metadata\": {\r\n \"intentId\": \"e5c852af-dc72-4350-ba37-13627fec46cb\",\r\n \"webhookUsed\": \"true\",\r\n \"webhookForSlotFillingUsed\": \"false\",\r\n \"webhookResponseTime\": 59,\r\n \"intentName\": \"smartlearning.operations\"\r\n },\r\n \"fulfillment\": {\r\n \"speech\": \"Read Aloud section contains 5 sections, They are Purpose for Listening WHAT'S THE WEATHER LIKE Comprehension Questions Word Work Characteristics Check for Understanding\",\r\n \"displayText\": \"Read Aloud section contains 5 sections, They are Purpose for Listening WHAT'S THE WEATHER LIKE Comprehension Questions Word Work Characteristics Check for Understanding\",\r\n \"messages\": [{\r\n \"type\": 0,\r\n \"speech\": \"Read Aloud section contains 5 sections, They are Purpose for Listening WHAT'S THE WEATHER LIKE Comprehension Questions Word Work Characteristics Check for Understanding\"\r\n }],\r\n \"data\": {\r\n \"google\": {\r\n \"is_ssml\": True,\r\n \"expect_user_response\": True\r\n }\r\n }\r\n },\r\n \"score\": 0.7549868384261669\r\n },\r\n \"status\": {\r\n \"code\": 200,\r\n \"errorType\": \"success\",\r\n \"webhookTimedOut\": False\r\n },\r\n \"sessionId\": \"1cb7acdf-23a3-4c1f-9605-97f4a4a68628\"\r\n}\r\n\r\nclass TestRequestOnOperations(TestCase):\r\n \r\n params = request[\"result\"][\"parameters\"]\r\n params[\"intent\"] = request[\"result\"][\"metadata\"][\"intentName\"]\r\n context = request[\"result\"][\"contexts\"]\r\n \r\n def test_repeat_request(self):\r\n self.params[\"operations\"] = \"repeat\"\r\n result,context = get_response.generate_response(self.params, self.context)\r\n print (1,result)\r\n \r\n def test_previous_request(self):\r\n self.params[\"operations\"] = \"previous\"\r\n result,context = get_response.generate_response(self.params, self.context)\r\n print (2,result)\r\n \r\n def test_next_request(self):\r\n self.params[\"operations\"] = \"next\"\r\n result,context = get_response.generate_response(self.params, self.context)\r\n print (3,result)\r\n \r\n def test_previous_request_for_first_section(self):\r\n for context in self.context:\r\n if context[\"name\"] == \"operations-followup\":\r\n context[\"parameters\"][\"lesson_sub_section\"] = \"Purpose for Listening\"\r\n self.params[\"operations\"] = \"previous\"\r\n result,context = get_response.generate_response(self.params, self.context)\r\n print (4,result)\r\n \r\n def test_next_request_for_last_section(self):\r\n for context in self.context:\r\n if context[\"name\"] == \"operations-followup\":\r\n context[\"parameters\"][\"lesson_sub_section\"] = \"Check for Understanding\"\r\n self.params[\"operations\"] = \"next\"\r\n result,context = get_response.generate_response(self.params, self.context)\r\n print (5,result)\r\n \r\n \r\n","repo_name":"crazyapidev/question-analysis","sub_path":"smart-learning/request_handler/tests/test_operation_requests.py","file_name":"test_operation_requests.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72714479236","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom sklearn import datasets\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n#binary classification data for logistic regression\r\ndataset=datasets.load_breast_cancer()\r\nx,y=dataset.data,dataset.target\r\nn_samples,n_features=x.shape\r\n#print(n_samples,n_features)\r\n\r\n#training and testing data\r\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=123)\r\n\r\n#scaling data to tranform data to have mean=0, and standard variance=1\r\nsc=StandardScaler()\r\nx_train=sc.fit_transform(x_train)\r\nx_test=sc.transform(x_test)\r\n\r\n#initially values loaded are double\r\nx_train=torch.from_numpy(x_train.astype(np.float32))\r\nx_test=torch.from_numpy(x_test.astype(np.float32))\r\ny_train=torch.from_numpy(y_train.astype(np.float32))\r\ny_test=torch.from_numpy(y_test.astype(np.float32))\r\n#print(x_train)\r\n#print(y_train)\r\n\r\n#reshaping y from row to column vector\r\ny_train=y_train.view(y_train.shape[0],1)\r\ny_test=y_test.view(y_test.shape[0],1)\r\n\r\n#model => f=wx+b => sigmoid function\r\n\r\nclass Log(nn.Module):\r\n\tdef __init__(self,inp_features):\r\n\t\tsuper(Log,self).__init__()\r\n\t\tself.linear=nn.Linear(inp_features,1)\r\n\t\r\n\tdef forward(self,x):\r\n\t\ty_pred=torch.sigmoid(self.linear(x))\r\n\t\treturn y_pred\r\n\r\nmodel=Log(n_features)\r\n\r\n#learning rate\r\nlrate=0.01\r\n\r\n#loss-binary cross entropy loss\r\nloss=nn.BCELoss()\r\n\r\n#optimizer-Stochastic gradient descent\r\noptimizer=torch.optim.SGD(model.parameters(),lr=lrate)\r\n\r\n#training\r\nepochs=100\r\n\r\nfor epoch in range(epochs):\r\n\t#forward pass\r\n\ty_pred=model(x_train)\r\n\t#loss\r\n\tl=loss(y_pred,y_train)\r\n\t#backward pass-calculate gradients\r\n\tl.backward()\r\n\t#update weights\r\n\toptimizer.step()\r\n\t#empty gradients\r\n\toptimizer.zero_grad()\r\n\t\r\n\tif (epoch+1)%10==0:\r\n\t\tprint(f'epoch={epoch+1}, loss={l.item():.4f}')\r\n\r\n#evaluating the model\r\nwith torch.no_grad():\r\n\ty_pred=model(x_test)\r\n\ty_pred_class=y_pred.round()\r\n\tacc=y_pred_class.eq(y_test).sum()/float(y_test.shape[0])\r\n\tprint(f'accuracy={acc:.4f}')\r\n","repo_name":"supersjgk/Logistic_Regression_PyTorch","sub_path":"log_reg.py","file_name":"log_reg.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14742293838","text":"from .models import *\nfrom rest_framework import serializers\n\n\nclass MeasureUnitSerializer(serializers.ModelSerializer):\n class Meta:\n model = MeasureUnit\n exclude = (\n \"state\",\n \"deleted_date\",\n \"modified_date\",\n )\n\n\nclass CategoryProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = CategoryProduct\n exclude = (\n \"state\",\n \"deleted_date\",\n \"modified_date\",\n )\n\n\nclass IndicatorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Indicator\n exclude = (\n \"state\",\n \"deleted_date\",\n \"modified_date\",\n )\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Producto\n exclude = (\n \"state\",\n \"deleted_date\",\n \"modified_date\",\n \"created_date\",\n )\n\n#la funcion de estas validaciones, es que si no se envia el campo measure o cat \n#que retorne un response, pues el null False indicado en el model no retorna ningun\n#mensaje al frontend.\n\n#Estas dos validaciones retornar response en el caso de enviar el campo VACIO\n def validate_measure_unit(self,value): \n if value == '' or value == None:\n raise serializers.ValidationError({'error':'Debe ingresar una unidad de medida'})\n return value\n\n def validate_category_product(self,value): #indicamos que este dato sera obligatorio pero desde el serializer, no el model\n if value == '' or value == None:\n raise serializers.ValidationError({'error':'Debe ingresar una categoria'})\n return value\n\n#Estas dos validaciones retornar response en el caso de NO enviar el campo\n def validate(self,data):\n if 'measure_unit' not in data.keys():\n raise serializers.ValidationError({'error':'Debe ingresar una Unidad de medida'})\n if 'category_product' not in data.keys():\n raise serializers.ValidationError({'error':'Debe ingresar una categoria '})\n\n return data\n\n def to_representation(self, instance):\n return {\n \"id\": instance.id,\n \"stock\": instance.stock.get('quantity__sum',0) if instance.stock.get('quantity__sum') is not None else 0,\n#toma el campo quantity, donde su clave es quantity__sum, (nombre de campo __ operacion), por defecto es 0\n \"description\": instance.description,\n \"image\": instance.image.url if instance.image!= '' else '',\n \"measure_unit\": instance.measure_unit.description,\n \"category_product\": instance.category_product.description,\n }\n \n\n\n\n ","repo_name":"ivanmacedonio/ecommerce_api2","sub_path":"ecommerceDjango/products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19959344236","text":"import great_expectations as ge\nprint(ge.__version__)\n# add great_expectations/plugins to path\nimport sys, os\nimport datetime\n\nproject_dir = \"./ge_context/choose_your_adventure\"\nos.makedirs(project_dir)\ncontext = ge.data_context.DataContext.create(project_dir)\n\npandas = context.add_datasource(\n \"pandas\",\n class_name=\"PandasDatasource\",\n)\n\ncontext = ge.data_context.DataContext(os.path.join(project_dir, \"great_expectations\"))\nbatch_kwargs = context.build_batch_kwargs(\"pandas\", \"manual\", \"titanic\")\n# What expectation suite shall we use? Why, the \"adventure\" suite of course:\nexpectation_suite_name = \"adventure\"\n# Demo Mode? Uncomment the below first to use an empty validation suite\n# suite = context.create_expectation_suite(expectation_suite_name)","repo_name":"xxl4tomxu98/data-engineering-python-great-expectations","sub_path":"ge_environment.py","file_name":"ge_environment.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"73276293317","text":"def calculate_iou(bbox_ref, bbox_tar):\n \"\"\"calculate the iou of two bbox\n Args:\n bbox_ref (x,y min max): the bbox of reference\n bbox_tar (x,y min max): the bbox of target\n\n Returns:\n value: the iou value\n \"\"\"\n x_ref_min, y_ref_min, x_ref_max, y_ref_max = bbox_ref\n x_tar_min, y_tar_min, x_tar_max, y_tar_max = bbox_tar\n\n # Calculate the (x, y)-coordinates of the intersection rectangle\n x_A = max(x_ref_min, x_tar_min)\n y_A = max(y_ref_min, y_tar_min)\n x_B = min(x_ref_max, x_tar_max)\n y_B = min(y_ref_max, y_tar_max)\n\n # Compute the area of intersection rectangle\n inter_Area = max(0, x_B - x_A + 1) * max(0, y_B - y_A + 1)\n\n # Compute the area of both the prediction and ground-truth rectangles\n box_A_Area = (x_ref_max - x_ref_min + 1) * (y_ref_max - y_ref_min + 1)\n box_B_Area = (x_tar_max - x_tar_min + 1) * (y_tar_max - y_tar_min + 1)\n\n # NOTICE: different objects may overlap, uses area to determine the object\n if inter_Area / box_A_Area < 0.5:\n return 0\n\n # calculate the iou\n iou = inter_Area / float(box_A_Area + box_B_Area - inter_Area)\n return iou\n\n\nbox1 = (10, 20, 50, 80)\nbox2 = (10, 40, 70, 100)\niou = calculate_iou(box2, box2)\nprint(f\"IoU: {iou}\")\n","repo_name":"greatoyster/MV-ROPE","sub_path":"monocular_slam/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"30156537057","text":"from cProfile import label\r\nfrom cgitb import text\r\nfrom tkinter import *\r\nfrom tkinter import font\r\n\r\nmenu_incial = Tk()\r\nmenu_incial.title(\"Título\")\r\nmenu_incial.geometry(\"500x500\")\r\n\r\nlabel1 = Label(\r\n menu_incial,\r\n text=\"Frase de testes\",\r\n font=\"Arial 20\",\r\n bd=1,\r\n relief=\"solid\",\r\n )\r\nlabel1.pack() \r\n\r\nlabel2 = Label(menu_incial)\r\nlabel2['text'] = \"Texto da Lebel 2\"\r\nlabel2['font'] = \"Arial 20\"\r\nlabel2['bd'] = 1\r\nlabel2['relief'] = \"solid\"\r\nlabel2.pack()\r\n \r\n\r\nmenu_incial.mainloop()","repo_name":"HugoDev-Bastos/Tkinter-Projects","sub_path":"NovoPojeto/Aulas/app Aula 015.py","file_name":"app Aula 015.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"22650342225","text":"from scipy.stats import norm\r\nimport matplotlib.pyplot as plt\r\nimport quantecon as qe # all packages is installed if choose anaconda solution except quantecon\r\n# which can be obtained by pip install commend\r\nimport numpy as np\r\nfrom numba import jit\r\nfrom numpy.random import normal\r\nfrom scipy import special\r\nfrom datetime import datetime as dt\r\n\r\n\r\n# basic parameters\r\nky = 11\r\niy = 0.25\r\nrf = 0.01\r\ny_ss = 0.25\r\nl_ss = 0.33\r\nsigma = 2\r\nbeta = 1/(1 + rf)\r\ndelta = iy/ky\r\nalpha = ky*(rf + delta)\r\nA = (y_ss/l_ss)**(1 - alpha)/((alpha/(rf + delta))**alpha)\r\nk_ss = (alpha*A/(rf + delta))**(1/(1 - alpha))*l_ss\r\nnu = 1/(1 + (1 - l_ss)/l_ss*(1 - alpha)/(1 - alpha*delta/(rf + delta)))\r\n\r\nkmin = 0.75*k_ss\r\nkmax = 1.25*k_ss\r\ngrid = 5001\r\nK = np.linspace(kmin, kmax, grid)\r\n# iteration criterions\r\nmax_iter=10000 # maximum of iteration\r\ntol =1e-6 # tolerance\r\n\r\n# utility function\r\n@jit(nopython=True)\r\ndef u(c, l):\r\n return (c**nu*(1-l)**(1-nu))**(1-sigma)/(1-sigma)\r\n\r\n\r\n# (c) Tauchen method\r\ndef tauchen(mu, sigma_e, rho, lambda_z):\r\n # no. grid points\r\n N_z = 2*lambda_z+1\r\n # value of grid points\r\n Z = np.asarray([mu+lam*sigma_e/(1-rho**2)**0.5 for lam in range(-lambda_z, lambda_z+1)])\r\n # mid points\r\n M = np.asarray([(Z[i]+Z[i+1])/2 for i in range(N_z-1)])\r\n # transition matrix\r\n Pi = np.empty((N_z, N_z))\r\n # fill in probs\r\n for i in range(N_z):\r\n for j in range(N_z):\r\n if j==0:\r\n Pi[i, j] = special.ndtr((M[j]-(1-rho)*mu-rho*Z[i])/sigma_e)\r\n elif j0:\r\n v_z_kprime = 0\r\n for izprime in range(N_z):\r\n v_z_kprime += v_old[izprime, ikprime] * PPi[iz, izprime]\r\n v_z_k = (1 - beta)*(c**nu*(1 - l)**(1 - nu))**(1-sigma)/(1 - sigma) + beta*v_z_kprime\r\n if v_z_k > v_max_so_far:\r\n v_max_so_far = v_z_k\r\n ikprime_so_far = ikprime\r\n l_max_so_far = l\r\n # exploit value function concavity\r\n else:\r\n break\r\n # update value and policy function\r\n v_new[iz, ik] = v_max_so_far\r\n ikprime_new[iz, ik] = ikprime_so_far\r\n opt_l[iz, ik] = l_max_so_far\r\n # now errors\r\n temp_err = v_old[iz, ik]-v_new[iz, ik]\r\n temp_err1 = v_new[iz, ik]-v_old[iz, ik]\r\n if temp_err1 > temp_err:\r\n temp_err = temp_err1\r\n if temp_err > error:\r\n error = temp_err\r\n print(n, error)\r\n if error < tol:\r\n return n, v_new, ikprime_new, opt_l\r\n break\r\n else:\r\n v_old = v_new.copy()\r\n ikprime_old = ikprime_new.copy()\r\n n = n+1\r\n\r\nv_0 = np.zeros((N_z, grid))\r\nqe.util.tic()\r\nres = VFI_monotonicity_concavity(v_0)\r\nqe.util.toc()\r\n\r\nKprime_opt = np.zeros((N_z, grid))\r\nfor iz in range(N_z):\r\n for ik in range(grid):\r\n Kprime_opt[iz, ik] = K[int(res[2][iz][ik])]\r\nidentifier = int(dt.timestamp(dt.today()))\r\n# plot the value function, policy functions against z and k\r\nfig, axes = plt.subplots(3, 1, figsize=(8, 15))\r\nfor i in range(3):\r\n for iz in range(5):\r\n if i==0:\r\n axes[i].plot(K, res[1][4-iz], label='z=z('+str(4-iz)+')')\r\n axes[i].set_title(\"Value Function\")\r\n elif i==1:\r\n KK = [K[int(ikprime)] for ikprime in res[2][4-iz]]\r\n axes[i].plot(K, Kprime_opt[4-iz], label='z=z('+str(4-iz)+')')\r\n axes[i].set_title(\"Policy Function for Next Period's Capital\")\r\n else:\r\n axes[i].plot(K, res[3][4-iz], label='z=z('+str(4-iz)+')')\r\n axes[i].set_title(\"Policy Function for Labor\")\r\n axes[i].legend()\r\n axes[i].set_xlabel(\"Capital\")\r\nplt.savefig(str(identifier)+'HW101.pdf', dpi=250)\r\nplt.show()\r\n\r\n# Response Impulse Function\r\nZ_path = np.ones(50)*2\r\nZ_path[0] = 3\r\nZ_path = Z_path.astype(\"int\")\r\ndef simulate_rif(izpath = Z_path, ik_0 = 2500):\r\n IK = []\r\n IK.append(ik_0)\r\n IZ = izpath\r\n Z, KK, L, Y, C, I = [], [], [], [], [], []\r\n # sequence of all relevant variables\r\n for it in range(0, 49):\r\n # shock\r\n if it==0:\r\n shock = ZZ[IZ[it]]\r\n Z.append(shock)\r\n else:\r\n shock = Z[it-1]*rho\r\n Z.append(shock)\r\n # next period capital index\r\n ikprime = int(res[2][IZ[it], IK[it]])\r\n IK.append(ikprime)\r\n # capital\r\n capital = K[IK[it]]\r\n KK.append(capital)\r\n # labor\r\n labor = labor_solve(capital, shock, K[ikprime])\r\n L.append(labor)\r\n # output\r\n y = A*np.exp(shock)*capital**alpha*labor**(1-alpha)\r\n Y.append(y)\r\n # consumption\r\n cons = y + (1-delta)*capital-K[ikprime]\r\n C.append(cons)\r\n # investment\r\n invest = y - cons\r\n I.append(invest)\r\n return [[Z, KK], [L, Y], [C, I]]\r\n\r\nY, L, C, I= simulate_rif()[1][1], simulate_rif()[1][0], simulate_rif()[2][0], simulate_rif()[2][1]\r\n# get the relative change\r\ny_bar, l_bar, c_bar, i_bar = sum(Y)/len(Y), sum(L)/len(L), sum(C)/len(C), sum(I)/len(I)\r\nyy = [(y-y_bar)/y_bar for y in Y]\r\nll = [(l-l_bar)/l_bar for l in L]\r\ncc = [(c-c_bar)/c_bar for c in C]\r\nii = [(i-i_bar)/i_bar for i in I]\r\n\r\n# plot the latter three sequences against output dynamics\r\nfig, axes = plt.subplots(3, 1, figsize=(8, 15))\r\nLIST = [ii, cc, ll]\r\nnames = ['investment', 'consumption', 'labor']\r\nfor i in range(3):\r\n axes[i].plot(yy, label='output')\r\n axes[i].plot(LIST[i], label=names[i])\r\n axes[i].set_xlim(0, 50)\r\n axes[i].set_ylim(-0.1, 0.1)\r\n axes[i].set_title(names[i])\r\n axes[i].legend()\r\nplt.savefig(str(identifier)+'HW202.PDF', dpi=250)\r\nplt.show()\r\n\r\n# SIMULATE PATH\r\n# generate random sequence of shocks\r\nmc = qe.MarkovChain(PPi)\r\nZ_path = mc.simulate(ts_length=10000)\r\n# a function takes initial capital and shock sequence as given, generate the dynamics\r\ndef simulate(izpath = Z_path, ik_0 = 0):\r\n IK = []\r\n IK.append(ik_0)\r\n IZ = izpath\r\n Z, KK, L, Y, C, I = [], [], [], [], [], []\r\n # sequence of all relevant variables\r\n for it in range(0, 10000):\r\n # shock\r\n shock = ZZ[IZ[it]]\r\n Z.append(shock)\r\n # next period capital index\r\n ikprime = int(res[2][IZ[it], IK[it]])\r\n IK.append(ikprime)\r\n # capital\r\n capital = K[IK[it]]\r\n KK.append(capital)\r\n # labor\r\n labor = labor_solve(capital, shock, K[ikprime])\r\n L.append(labor)\r\n # output\r\n y = A*np.exp(shock)*capital**alpha*labor**(1-alpha)\r\n Y.append(y)\r\n # consumption\r\n cons = y + (1-delta)*capital-K[ikprime]\r\n C.append(cons)\r\n # investment\r\n invest = y - cons\r\n I.append(invest)\r\n return [[Z, KK], [L, Y], [C, I]]\r\n\r\n# drop the first 200 obs, and then obtain 500 obs\r\nY, L, C, I= simulate()[1][1][200:700], simulate()[1][0][200:700], simulate()[2][0][200:700], simulate()[2][1][200:700]\r\n# get the relative change\r\ny_bar, l_bar, c_bar, i_bar = sum(Y)/len(Y), sum(L)/len(L), sum(C)/len(C), sum(I)/len(I)\r\nyy = [(y-y_bar)/y_bar for y in Y]\r\nll = [(l-l_bar)/l_bar for l in L]\r\ncc = [(c-c_bar)/c_bar for c in C]\r\nii = [(i-i_bar)/i_bar for i in I]\r\n\r\n# plot the latter three sequences against output dynamics\r\nfig, axes = plt.subplots(3, 1, figsize=(8, 15))\r\nLIST = [ii, cc, ll]\r\nnames = ['investment', 'consumption', 'labor']\r\nfor i in range(3):\r\n axes[i].plot(yy, label='output')\r\n axes[i].plot(LIST[i], label=names[i])\r\n axes[i].set_xlim(0, 500)\r\n axes[i].set_ylim(-0.5, 0.5)\r\n axes[i].set_title(names[i])\r\n axes[i].legend()\r\nplt.savefig(str(identifier)+'HW102.PDF', dpi=250)\r\nplt.show()\r\n\r\n# save the data and read it in matlab to conduct business cycle analysis\r\nSim_data1 = np.asarray([Y, C, I, L])\r\nSim_data1 = Sim_data1.T\r\nnp.savetxt(str(identifier)+'.txt', Sim_data1, delimiter=',')\r\n\r\nprint('dynamics generated!')\r\n\r\nprint('now go to matlab!')","repo_name":"vitanova/econ","sub_path":"9430proj2_1.py","file_name":"9430proj2_1.py","file_ext":"py","file_size_in_byte":10367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"23152188714","text":"\"\"\"\n{\n \"name\": \"Somme de matrices\",\n \"description\": \"Renvoi la somme de deux matrices\",\n \"functions\": [{\n \"name\": \"somme_matrice\",\n \"description\": \"Renvoi la somme de deux matrices\",\n \"arguments\": [\n {\n \"name\": \"m\",\n \"type\": \"ndarray\",\n \"description\": \"Matrice 1\",\n \"default\": \"array([[1,2,3], [4,5,6], [1,2,3]])\"\n },\n {\n \"name\" : \"n\",\n \"type\": \"ndarray\",\n \"description\": \"Matrice 2\",\n \"default\": \"array([[2,7,1], [3,9,1], [2,4,6]])\"\n }\n ]\n }]\n}\n\"\"\"\n\nfrom numpy import ndarray, array, empty\nfrom ast import literal_eval\n\n\ndef main():\n try:\n m = array(literal_eval(input(\"La première matrice ? \")))\n n = array(literal_eval(input(\"La deuxième matrice ? \")))\n except:\n print(\"matrice incorrecte\")\n m = array([[1, 2, 3], [4, 5, 6], [1, 2, 3]])\n n = array([[2, 7, 1], [3, 9, 1], [2, 4, 6]])\n finally:\n if m.shape != n.shape:\n print(\"matrice incorrecte\")\n m = array([[1, 2, 3], [4, 5, 6], [1, 2, 3]])\n n = array([[2, 7, 1], [3, 9, 1], [2, 4, 6]])\n print(somme_matrice(m, n))\n\n\ndef somme_matrice(m: ndarray, n: ndarray):\n result = empty(m.shape)\n for i in range(len(m)):\n for j in range(len(m[i])):\n result[i, j] = m[i, j] + n[i, j]\n return result\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"0xybo/Python","sub_path":"files/Informatiques/Boucles imbriquées/somme_matrice.py","file_name":"somme_matrice.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6279473142","text":"import os\nimport logging\nimport random \nimport math \nimport numpy as np \nimport scipy.ndimage\nfrom collections.abc import Iterable\n\nimport PIL\nfrom PIL import Image, ImageFilter, ImageEnhance, ImageOps\nimport cv2 \n\nfrom omniprint import freetype_text_generator, background_generator\nimport transforms\nfrom utils import get_font_weight_range, generate_random_color \nfrom utils import gaussian_blur_RGB, different_random_color\nfrom utils import fill_foreground_color, fill_foreground_image\nfrom utils import generate_text_outline\nfrom poisson_image_editing import blit_images as poisson_editing\n\n\n_high_level_lt_params = [\"rotation\", \"shear_x\", \"shear_y\", \"scale_x\", \n \"scale_y\", \"alpha\", \"beta\", \"gamma\", \"delta\"]\n_random_high_level_lt_params = [\"random_rotation\", \"random_shear_x\", \"random_shear_y\", \n \"random_scale_x\", \"random_scale_y\", \"random_alpha\", \n \"random_beta\", \"random_gamma\", \"random_delta\"]\n_background_image_labels = [\"background_image_name\", \"background_image_original_width\", \n \"background_image_original_height\", \"background_image_resized_width\", \n \"background_image_resized_height\", \"background_image_crop_x\", \n \"background_image_crop_y\", \"background_image_crop_x_plus_width\", \n \"background_image_crop_y_plus_height\"] \n_foreground_image_labels = [\"foreground_image_name\", \"foreground_image_original_width\", \n \"foreground_image_original_height\", \"foreground_image_resized_width\", \n \"foreground_image_resized_height\", \"foreground_image_crop_x\", \n \"foreground_image_crop_y\", \"foreground_image_crop_x_plus_width\", \n \"foreground_image_crop_y_plus_height\"] \n_outline_image_labels = [\"outline_image_name\", \"outline_image_original_width\", \n \"outline_image_original_height\", \"outline_image_resized_width\", \n \"outline_image_resized_height\", \"outline_image_crop_x\", \n \"outline_image_crop_y\", \"outline_image_crop_x_plus_width\", \n \"outline_image_crop_y_plus_height\"] \n_background_random_color_composition_labels = [\"background_color\", \n \"background_polygon_fill_color\", \n \"background_polygon_outline_color\", \n \"background_random_color_composition_params\"]\n\n\n\nclass TextDataGenerator(object):\n @classmethod\n def generate_from_tuple(cls, t):\n \"\"\"\n Same as generate, but takes all parameters as one tuple\n \"\"\"\n\n return cls.generate(*t)\n\n @classmethod\n def generate(cls, index, text, font_file_path, args, returns_img=True):\n # dictionary to store all kinds of labels \n label = {}\n \n if args.get(\"random_seed\") is not None:\n random.seed(3 * args.get(\"random_seed\") + 2 + 2 * index)\n np.random.seed(4 * args.get(\"random_seed\") + 3 + 3 * index)\n\n margin_top, margin_left, margin_bottom, margin_right = args.get(\"margins\") \n assert margin_top >= 0, \"Margins cannot be negative.\" \n assert margin_left >= 0, \"Margins cannot be negative.\" \n assert margin_bottom >= 0, \"Margins cannot be negative.\" \n assert margin_right >= 0, \"Margins cannot be negative.\" \n assert margin_top + margin_bottom < 1, \"Sum of vertical margins exceeds limit.\"\n assert margin_left + margin_right < 1, \"Sum of horizontal margins exceeds limit.\"\n if args.get(\"ensure_square_layout\"):\n assert margin_top + margin_bottom == margin_left + margin_right\n\n # collect labels\n label[\"text\"] = text\n if len(text) == 1:\n label[\"unicode_code_point\"] = ord(text)\n label[\"font_file\"] = os.path.basename(font_file_path)\n label[\"margin_top\"] = margin_top\n label[\"margin_left\"] = margin_left\n label[\"margin_bottom\"] = margin_bottom\n label[\"margin_right\"] = margin_right\n args, label = log_text_set(args, label)\n \n img, mask, label, args = generate_initial_image(text, font_file_path, args, label)\n\n img, mask, label, args = add_image_margins(img, mask, label, args)\n\n img, mask, label, args = apply_morphological_transformations(img, mask, label, args)\n\n img, mask, label, args = apply_post_rasterization_elastic_transformation(img, mask, label, args)\n\n img, mask, label, args = apply_perspective_transformation(img, mask, label, args)\n \n if args.get(\"background\") == \"image\":\n img, mask, label, args = resize_image(img, mask, label, args)\n img, mask, label, args = fill_foreground(img, mask, label, args)\n img, mask, label, args = fill_outline(img, mask, label, args)\n img, mask, label, args = add_background(img, mask, label, args)\n img, label = image_enhancement(img, label, args)\n else:\n img, label = image_enhancement(img, label, args)\n img, mask, label, args = resize_image(img, mask, label, args)\n img, mask, label, args = fill_foreground(img, mask, label, args)\n img, mask, label, args = fill_outline(img, mask, label, args)\n img, mask, label, args = add_background(img, mask, label, args)\n \n img, mask, label, args = apply_gaussian_blur(img, mask, label, args)\n \n img, label, args = change_image_mode(img, label, args)\n \n save_image_(img, mask, label, args, index)\n \n if returns_img: \n if args.get(\"output_mask\"):\n return img, mask, label\n return img, label\n else:\n return label \n\n\ndef gaussian_lanczos(img, size, sigma):\n \"\"\"\n first apply Gaussian filter to smooth image, \n then resize image using Lanczos filter with reducing_gap=4 \n \n img:\n PIL.Image.Image or np.array\n size:\n tuple of size 2\n sigma:\n scalar \n \"\"\"\n img = gaussian_blur_RGB(img, sigma=sigma)\n return img.resize(size, resample=Image.LANCZOS, reducing_gap=4)\n\n\ndef image_enhancement(img, label, args):\n for name_, func_ in zip([\"brightness\", \"contrast\", \"color_enhance\", \"sharpness\"], \n [ImageEnhance.Brightness, ImageEnhance.Contrast, ImageEnhance.Color, ImageEnhance.Sharpness]):\n if args.get(name_) is not None:\n factor = args.get(name_)\n if isinstance(factor, Iterable):\n if len(factor) == 2:\n factor = np.random.uniform(factor[0], factor[1], None)\n elif len(factor) == 1:\n factor = factor[0]\n else:\n raise Exception(\"More than two values received.\")\n img = func_(img).enhance(factor) \n label[name_] = factor\n return img, label\n\n\ndef factor2magnitude(factor):\n \"\"\"legacy function\"\"\"\n if factor == 0:\n return 0.01\n if factor < 1:\n return 1 / factor \n return factor \n\n\ndef add_image_margins(img, mask, label, args):\n margin_top = label.get(\"margin_top\") \n margin_left = label.get(\"margin_left\") \n margin_bottom = label.get(\"margin_bottom\") \n margin_right = label.get(\"margin_right\") \n\n if args.get(\"ensure_square_layout\"):\n max_size = max(img.size[0], img.size[1]) \n background_w = math.ceil(max_size / (1 - margin_left - margin_right))\n background_h = math.ceil(max_size / (1 - margin_top - margin_bottom)) \n offset_x = (max_size - img.size[0]) // 2 + math.floor(background_w * margin_left)\n offset_y = (max_size - img.size[1]) // 2 + math.floor(background_h * margin_top)\n else:\n background_w = math.ceil(img.size[0] / (1 - margin_left - margin_right))\n background_h = math.ceil(img.size[1] / (1 - margin_top - margin_bottom)) \n offset_x = math.floor(background_w * margin_left)\n offset_y = math.floor(background_h * margin_top)\n if args.get(\"random_translation_x\"):\n offset_x = random.randint(0, math.floor(background_w - img.size[0]))\n if args.get(\"random_translation_y\"):\n offset_y = random.randint(0, math.floor(background_h - img.size[1]))\n background = Image.new(\"RGB\", (background_w, background_h), (255, 255, 255)) \n background.paste(img, (offset_x, offset_y), mask)\n background_mask = Image.new(\"L\", (background_w, background_h), 0) \n background_mask.paste(mask, (offset_x, offset_y), mask)\n img = background \n mask = background_mask \n\n # collect labels\n label[\"offset_horizontal\"] = offset_x\n label[\"offset_vertical\"] = offset_y\n label[\"original_image_width_resolution\"] = background_w\n label[\"original_image_height_resolution\"] = background_h\n\n return img, mask, label, args\n\ndef resize_image(img, mask, label, args):\n final_h = args.get(\"size\") \n if args.get(\"ensure_square_layout\"):\n final_w = args.get(\"size\")\n else:\n final_w = math.ceil(final_h * img.size[0] / img.size[1])\n\n # resize img and mask \n gaussian_prior_resizing = args.get(\"gaussian_prior_resizing\") \n if gaussian_prior_resizing is None:\n # directly resize\n img = img.resize((final_w, final_h), resample=Image.LANCZOS, reducing_gap=4) \n mask = mask.resize((final_w, final_h), resample=Image.LANCZOS, reducing_gap=4)\n else:\n # apply Gaussian filter before resizing \n img = gaussian_lanczos(img, size=(final_w, final_h), \n sigma=gaussian_prior_resizing)\n mask = gaussian_lanczos(mask, size=(final_w, final_h), \n sigma=gaussian_prior_resizing)\n label[\"gaussian_prior_resizing\"] = gaussian_prior_resizing\n \n # collect labels\n label[\"image_width_resolution\"] = final_w\n label[\"image_height_resolution\"] = final_h\n\n return img, mask, label, args\n\n\ndef image_blending(img, mask, background, method=\"poisson\"):\n if method == \"trivial\":\n background.paste(img, (0, 0), mask)\n img = background\n elif method == \"poisson\":\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n background = cv2.cvtColor(np.array(background), cv2.COLOR_RGB2BGR)\n img = poisson_editing(img, background)\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n else:\n raise Exception(\"Not implemented method {}\".format(method)) \n return img, mask \n\ndef determine_image_blending_method(background_type):\n \"\"\"\n Not used at this stage \n \"\"\"\n if background_type in [\"image\"]:\n return \"poisson\"\n else:\n # The \"poisson\" method can render false image in \n # some cases, e.g. white text on black background \n return \"trivial\"\n\ndef get_foreground_color(args):\n if args.get(\"stroke_fill\") is None:\n foreground_color = (0, 0, 0)\n else:\n foreground_color = args.get(\"stroke_fill\") \n return foreground_color\n\ndef add_background(img, mask, label, args):\n background_type = args.get(\"background\")\n final_w = label.get(\"image_width_resolution\") \n final_h = label.get(\"image_height_resolution\") \n\n rgb_value = background_type.split(\",\")\n\n if len(rgb_value) == 3:\n rgb_value = [int(xx) for xx in rgb_value]\n assert isinstance(rgb_value[0], int) and rgb_value[0] >= 0 and rgb_value[0] <= 255\n assert isinstance(rgb_value[1], int) and rgb_value[1] >= 0 and rgb_value[1] <= 255\n assert isinstance(rgb_value[2], int) and rgb_value[2] >= 0 and rgb_value[2] <= 255\n color = (rgb_value[0], rgb_value[1], rgb_value[2])\n background_img = background_generator.plain_color(final_h, final_w, color)\n label[\"background_color\"] = color\n elif background_type == \"plain_white\":\n background_img = background_generator.plain_white(final_h, final_w)\n label[\"background_color\"] = (255, 255, 255)\n elif background_type == \"random_plain_color\":\n # by default, the background color will not be too similar to the foreground color \n color = different_random_color(get_foreground_color(args), method=\"randomcolor\")\n background_img = background_generator.plain_color(final_h, final_w, color)\n label[\"background_color\"] = color\n elif background_type == \"image\":\n background_img, label_info = background_generator.image(final_h, final_w, args.get(\"image_dir\")) \n for label_name, label_content in zip(_background_image_labels, label_info):\n label[label_name] = label_content\n elif background_type == \"random_color_composition\":\n background_img, label_info = background_generator.random_color_composition(final_h, final_w, \n get_foreground_color(args), background_random_color_composition_params=None)\n for label_name, label_content in zip(_background_random_color_composition_labels, label_info):\n label[label_name] = label_content\n elif background_type == \"gaussian_noise\":\n background_img = background_generator.gaussian_noise(final_h, final_w)\n elif background_type == \"quasicrystal\":\n background_img = background_generator.quasicrystal(final_h, final_w)\n else:\n raise NotImplementedError\n label[\"background\"] = background_type\n\n image_blending_method = args.get(\"image_blending_method\")\n img, mask = image_blending(img, mask, background_img, method=image_blending_method)\n label[\"image_blending_method\"] = image_blending_method\n\n return img, mask, label, args \n\ndef apply_gaussian_blur(img, mask, label, args):\n blur = args.get(\"blur\")\n if blur is not None:\n if isinstance(blur, Iterable):\n if len(blur) == 2:\n blur = random.randint(blur[0], blur[1])\n elif len(blur) == 1:\n blur = blur[0]\n else:\n raise Exception(\"More than two values received.\")\n img = gaussian_blur_RGB(img, sigma=blur)\n mask = Image.fromarray(scipy.ndimage.gaussian_filter(mask, sigma=blur))\n\n # collect labels\n label[\"blur_radius\"] = blur \n\n return img, mask, label, args \n\ndef change_image_mode(img, label, args):\n \"\"\"\n Change image mode (RGB, grayscale, etc.)\n \"\"\"\n img = img.convert(args.get(\"image_mode\")) \n label[\"image_mode\"] = args.get(\"image_mode\")\n return img, label, args\n\ndef save_image_(img, mask, label, args, index):\n if args.get(\"output_data_dir\") is not None:\n # Generate name for resulting image\n extension = args.get(\"extension\")\n file_prefix = args.get(\"dataset_id\") + \"_{}\".format(index)\n image_name = \"{}.{}\".format(file_prefix, extension)\n mask_name = \"{}_mask.{}\".format(file_prefix, extension)\n image_name = os.path.join(args.get(\"output_data_dir\"), image_name)\n mask_name = os.path.join(args.get(\"output_data_dir\"), mask_name)\n\n # save \n img.save(image_name)\n label[\"image_name\"] = os.path.basename(image_name)\n if args.get(\"output_mask\"):\n mask.save(mask_name)\n label[\"mask_name\"] = os.path.basename(mask_name)\n\ndef apply_perspective_transformation(img, mask, label, args):\n # perspective/projective transformation \n if args.get(\"random_perspective_transform\") is not None:\n if not all_margins_are_positive(label):\n logging.warning(\"\"\"Using perspective transformation, however \n some margins are zero, part of transformed text may fall out \n of the image boundary, which can lead to incomplete text.\"\"\")\n img, mask, perspective_params = transforms.perspective_transform(img, mask, \n quadrilateral=None, \n gaussian_std=args.get(\"random_perspective_transform\"),\n return_perspective_params=True)\n # collect labels\n label[\"perspective_params\"] = perspective_params \n elif args.get(\"perspective_transform\") is not None:\n if not all_margins_are_positive(label):\n logging.warning(\"\"\"Using perspective transformation, however \n some margins are zero, part of transformed text may fall out \n of the image boundary, which can lead to incomplete text.\"\"\")\n perspective_transform = np.asarray(args.get(\"perspective_transform\")).reshape((4, 2))\n img, mask, perspective_params = transforms.perspective_transform(img, mask, \n quadrilateral=perspective_transform, \n gaussian_std=None,\n return_perspective_params=True)\n # collect labels\n label[\"perspective_params\"] = perspective_params \n return img, mask, label, args\n\ndef generate_initial_image(text, font_file_path, args, label):\n transform_param = {}\n if args.get(\"linear_transform\") is not None:\n transform_param = args.get(\"linear_transform\") \n label[\"linear_transform\"] = transform_param \n else:\n for lt_param_ in _high_level_lt_params:\n if args.get(lt_param_) is not None:\n value_ = args.get(lt_param_)\n if isinstance(value_, Iterable):\n if len(value_) == 2:\n transform_param[lt_param_] = random.uniform(value_[0], value_[1])\n elif len(value_) == 1:\n transform_param[lt_param_] = value_[0]\n else:\n raise Exception(\"More than two values received.\")\n else:\n transform_param[lt_param_] = value_\n\n # collect labels\n for lt_param_ in _high_level_lt_params:\n if args.get(lt_param_) is not None:\n label[lt_param_] = transform_param[lt_param_] \n\n # sample random stroke width\n font_weight = args.get(\"font_weight\")\n if font_weight is not None:\n if isinstance(font_weight, Iterable):\n if len(font_weight) == 2:\n min_font_weight, max_font_weight = get_font_weight_range(font_file_path)\n if min_font_weight is not None:\n min_font_weight = max(min_font_weight, font_weight[0])\n else:\n min_font_weight = font_weight[0]\n if max_font_weight is not None:\n max_font_weight = min(max_font_weight, font_weight[1])\n else:\n max_font_weight = font_weight[1]\n args[\"font_weight\"] = np.random.uniform(min_font_weight, max_font_weight, None) \n elif len(font_weight) == 1:\n args[\"font_weight\"] = font_weight[0]\n else:\n raise Exception(\"More than two values received.\")\n \n # generate initial text image \n try:\n img, mask = freetype_text_generator.render_lt_text(text, \n font_file_path, \n transform_param=transform_param, \n font_size=args.get(\"font_size\"), \n font_weight=args.get(\"font_weight\"), \n stroke_radius=args.get(\"outline_width\"),\n pre_elastic=args.get(\"pre_elastic\"),\n stretch_ascender=args.get(\"stretch_ascender\"),\n stretch_descender=args.get(\"stretch_descender\"))\n except Exception as exception_:\n raise Exception(\"\"\"freetype_text_generator.render_lt_text failed with text {} and \n font_file_path {}. The Exception is {}\"\"\".format(text, font_file_path, exception_))\n # collect labels\n for x in [\"font_size\", \"font_weight\", \"pre_elastic\", \"stretch_ascender\", \"stretch_descender\"]:\n if args.get(x) is not None:\n label[x] = args.get(x)\n return img, mask, label, args\n\ndef apply_morphological_transformations(img, mask, label, args):\n morph_operations = zip([\"morph_erosion\", \n \"morph_dilation\"], \n [transforms.morph_erosion_transform, \n transforms.morph_dilation_transform])\n for morph_operation, morph_func in morph_operations:\n if args.get(morph_operation) is not None:\n if not all_margins_are_positive(label):\n logging.warning(\"\"\"Using morphological image processing {}, however \n some margins are zero, which can \n lead to unwelcome artifacts.\"\"\".format(args.get(morph_operation)))\n kernel_size, iterations, kernel_shape = args.get(morph_operation) \n if args.get(\"random_{}\".format(morph_operation)):\n kernel_size = np.random.randint(0, kernel_size + 1)\n iterations = np.random.randint(0, iterations + 1)\n kernel_shape = np.random.choice([None, \"ellipse\", \"cross\"], \n size=None, replace=True)\n img, mask = morph_func(img, mask, \n kernel_size=kernel_size, \n iterations=iterations, \n kernel_shape=kernel_shape)\n label[\"{}_kernel_size\".format(morph_operation)] = kernel_size\n if kernel_shape is None:\n kernel_shape = \"rectangle\"\n label[\"{}_kernel_shape\".format(morph_operation)] = kernel_shape \n label[\"{}_iterations\".format(morph_operation)] = iterations \n\n morph_operations = zip([\"morph_opening\",\n \"morph_closing\",\n \"morph_gradient\",\n \"morph_tophat\",\n \"morph_blackhat\"], \n [transforms.morph_opening_transform,\n transforms.morph_closing_transform,\n transforms.morph_gradient_transform,\n transforms.morph_tophat_transform,\n transforms.morph_blackhat_transform])\n for morph_operation, morph_func in morph_operations:\n if args.get(morph_operation) is not None: \n if not all_margins_are_positive(label):\n logging.warning(\"\"\"Using morphological image processing {}, however \n some margins are zero, which can \n lead to unwelcome artifacts.\"\"\".format(args.get(morph_operation)))\n kernel_size, kernel_shape = args.get(morph_operation) \n if args.get(\"random_{}\".format(morph_operation)): \n kernel_size = np.random.randint(0, kernel_size + 1) \n kernel_shape = np.random.choice([None, \"ellipse\", \"cross\"], \n size=None, replace=True)\n img, mask = morph_func(img, mask, \n kernel_size=kernel_size, \n kernel_shape=kernel_shape)\n label[\"{}_kernel_size\".format(morph_operation)] = kernel_size\n if kernel_shape is None:\n kernel_shape = \"rectangle\"\n label[\"{}_kernel_shape\".format(morph_operation)] = kernel_shape\n\n return img, mask, label, args\n\ndef apply_post_rasterization_elastic_transformation(img, mask, label, args):\n if args.get(\"post_elastic\") is not None:\n img, mask = transforms.elastic_transform(img, mask, \n args.get(\"post_elastic\"))\n label[\"post_elastic\"] = args.get(\"post_elastic\") \n return img, mask, label, args\n\ndef fill_foreground(img, mask, label, args):\n \"\"\"\n fill the foreground\n\n This function assumes that the (possibly anti-aliased) image (img) \n contains black text on white background. The color of the text will \n be replaced by another color while avoiding boundary anti-aliasing \n artifacts \n \"\"\"\n if args.get(\"foreground_image\"):\n label[\"foreground\"] = \"image\"\n width, height = mask.size\n external_image, label_info = background_generator.image(height, width, args.get(\"foreground_image_dir\"))\n img, mask = fill_foreground_image(mask, external_image)\n for label_name, label_content in zip(_foreground_image_labels, label_info):\n label[label_name] = label_content\n else:\n if args.get(\"random_stroke_fill\"):\n args[\"stroke_fill\"] = generate_random_color(method=\"randomcolor\")\n label[\"foreground\"] = \"random_color\"\n else:\n label[\"foreground\"] = \"others\"\n img, mask = fill_foreground_color(mask, args.get(\"stroke_fill\"))\n \n if args.get(\"stroke_fill\") is not None:\n label[\"stroke_fill\"] = args.get(\"stroke_fill\")\n return img, mask, label, args\n\n\ndef fill_outline(img, mask, label, args):\n outline = args.get(\"outline\")\n if outline is not None:\n if outline == \"image\":\n # fill text outline with natural image/texture\n label[\"outline\"] = \"image\"\n width, height = mask.size\n outline, label_info = background_generator.image(height, width, args.get(\"outline_image_dir\"))\n for label_name, label_content in zip(_outline_image_labels, label_info):\n label[label_name] = label_content\n elif isinstance(outline, str):\n # fill text outline with uniform color \n if outline == \"random_color\":\n label[\"outline\"] = outline\n else:\n outline = tuple([int(xx) for xx in outline.split(\",\")])\n label[\"outline\"] = outline\n else:\n raise Exception(\"Invalid outline: {}\".format(outline))\n img, mask = generate_text_outline(img, mask, outline, \n outline_size=args.get(\"outline_size\"))\n label[\"outline_size\"] = args.get(\"outline_size\")\n return img, mask, label, args\n\n\ndef all_margins_are_positive(label):\n if label.get(\"margin_top\") > 0 and \\\n label.get(\"margin_left\") > 0 and \\\n label.get(\"margin_bottom\") > 0 and \\\n label.get(\"margin_right\") > 0:\n return True\n else:\n return False \n\n\ndef log_text_set(args, label):\n if args.get(\"dict\") != \"alphabets/***EMPTY***\":\n text_set = args.get(\"dict\")\n else:\n assert args.get(\"textfile\") != \"textfiles/***EMPTY***\"\n text_set = args.get(\"textfile\")\n text_set = os.path.splitext(os.path.basename(text_set))[0]\n label[\"text_set\"] = text_set\n return args, label\n\n\n\n\n\n\n\n","repo_name":"SunHaozhe/OmniPrint","sub_path":"omniprint/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":26960,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"62"} +{"seq_id":"26864385935","text":"# Test helper function in utils module\nimport numpy as np\nimport pytest\nimport limetr.utils as utils\n\n\n@pytest.mark.parametrize('vec', [np.arange(6)])\n@pytest.mark.parametrize('sizes', [[1, 2, 3], [3, 2, 1]])\ndef test_split_by_sizes(vec, sizes):\n vecs = utils.split_by_sizes(vec, sizes)\n assert all([vecs[i].size == size for i, size in enumerate(sizes)])\n\n\ndef test_empty_array():\n array = utils.empty_array()\n assert array.size == 0\n assert np.issubdtype(array.dtype, float)\n\n\n@pytest.mark.parametrize('vec', [[0, 1, 2]])\n@pytest.mark.parametrize('size', [2])\ndef test_check_size_validate(vec, size):\n with pytest.raises(ValueError):\n utils.check_size(vec, size)\n\n\ndef test_check_size():\n utils.check_size([1, 2, 3], 3)\n\n\n@pytest.mark.parametrize(('obj', 'result'),\n [(3, False),\n ([3], True)])\ndef test_iterable(obj, result):\n assert utils.iterable(obj) == result\n\n\n@pytest.mark.parametrize(('array', 'result'),\n [(np.array([1, 1, 2]), False),\n (np.array([1, 2, 3]), True)])\ndef test_has_no_repeat(array, result):\n assert utils.has_no_repeat(array) == result\n\n\ndef test_sizes_to_slices():\n sizes = [1, 2, 3]\n slices = [slice(0, 1), slice(1, 3), slice(3, 6)]\n result = utils.sizes_to_slices(sizes)\n assert all([result[i] == slices[i] for i in range(len(sizes))])\n\n\n@pytest.mark.parametrize(('vec', 'size', 'default_value'),\n [([], 5, 1),\n (1, 5, None),\n ([1]*5, 5, None)])\n@pytest.mark.parametrize('result', [np.ones(5)])\ndef test_default_vec_factory(vec, size, default_value, result):\n my_result = utils.default_vec_factory(vec, size, default_value)\n assert np.allclose(my_result, result)\n\n\n@pytest.mark.parametrize(\"objs\", [[[1.0, 2.0, 3.0], 2.0, (2.0, 1.0)]])\ndef test_get_maxlen(objs):\n assert utils.get_maxlen(objs) == 3\n\n\n@pytest.mark.parametrize(\"objs\", [[[1.0, 2.0, 3.0], 2.0, (1.0,)]])\ndef test_broadcast(objs):\n my_result = utils.broadcast(objs, utils.get_maxlen(objs))\n assert np.allclose(my_result,\n np.array([[1.0, 2.0, 3.0],\n [2.0, 2.0, 2.0],\n [1.0, 1.0, 1.0]]))\n\n\n@pytest.mark.parametrize(\"size\", [0])\n@pytest.mark.parametrize(\"objs\", [[[1.0, 2.0, 3.0], 2.0, (1.0,)]])\ndef test_broadcast_size_zero(size, objs):\n my_result = utils.broadcast(objs, size)\n assert my_result.shape == (len(objs), 0)\n","repo_name":"zhengp0/limetr","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"69919105479","text":"# coding=utf-8\n\"\"\"\n@Project: FlaskFrame\n@File: app/util/mail.py\n@Author: Dustin Lin\n@Created on: 2022/10/22 21:48:37\n\"\"\"\nfrom flask_mail import Message\nfrom app import mail\nfrom flask import current_app\nfrom threading import Thread\n\n\ndef send_welcome_email(recipient: str):\n\tmsg_title = \"Welcome\"\n\tmsg_recipients: list = [recipient]\n\tmsg_body = \"Congrats. Register successfully\"\n\tmsg = Message(msg_title, recipients=msg_recipients)\n\tmsg.body = msg_body\n\tthr = Thread(target=send_async_email, args=(current_app._get_current_object(), msg))\n\tthr.start()\n\n\ndef send_async_email(app, msg):\n\twith app.app_context():\n\t\tmail.send(msg)\n\n\n\n\n\n\n","repo_name":"dustinlph/FlaskFrame","sub_path":"app/util/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74276002117","text":"import tensorflow as tf\nimport numpy as np\nfrom layers import conv, lrn, max_pool, fc, dropout\n\n\nclass VS_CNN(object):\n\n def __init__(self, num_classes, skip_layers=None, weights_path='weights/bvlc_alexnet.npy'):\n self.num_classes = num_classes\n self.skip_layers = skip_layers\n self.weights_path = weights_path\n\n self._build_graph()\n\n def _build_graph(self):\n self.x = tf.placeholder(tf.float32, [None, 227, 227, 3])\n self.y = tf.placeholder(tf.float32, [None, 2])\n self.keep_prob = tf.placeholder_with_default(1.0, shape=[], name='dropout_keep_prob')\n\n # 1st Layer: Conv (w ReLu) -> Lrn -> Pool\n conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1')\n norm1 = lrn(conv1, 2, 1e-05, 0.75, name='norm1')\n pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')\n\n # 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups\n conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')\n norm2 = lrn(conv2, 2, 1e-05, 0.75, name='norm2')\n pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')\n\n # 3rd Layer: Conv (w ReLu)\n conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')\n\n # 4th Layer: Conv (w ReLu) split into two groups\n conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')\n\n # 5th Layer: Conv (w ReLu) -> Pool split into two groups\n conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')\n pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')\n\n # 6th Layer: Flatten -> FC (w ReLu) -> Dropout\n flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])\n fc6 = fc(flattened, 6 * 6 * 256, 4096, name='fc6')\n dropout6 = dropout(fc6, self.keep_prob)\n\n # 7th Layer: FC (w ReLu) -> Dropout\n fc7 = fc(dropout6, 4096, 4096, name='fc7')\n dropout7 = dropout(fc7, self.keep_prob)\n\n # 8th Layer: FC and return unscaled activations\n self.fc8 = fc(dropout7, 4096, self.num_classes, relu=False, name='fc8')\n self.prob = tf.nn.softmax(self.fc8, name='prob')\n\n def load_initial_weights(self, session):\n weights_dict = dict(np.load(self.weights_path, encoding='bytes').item())\n\n for op_name in weights_dict.keys():\n if op_name not in self.skip_layers:\n with tf.variable_scope(op_name, reuse=True):\n for data in weights_dict[op_name]:\n if len(data.shape) == 1:\n var = tf.get_variable('biases', trainable=True)\n session.run(var.assign(data))\n else:\n var = tf.get_variable('weights', trainable=True)\n session.run(var.assign(data))","repo_name":"PreferredAI/vs-cnn","sub_path":"model_base.py","file_name":"model_base.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"62"} +{"seq_id":"18085140536","text":"import os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport getopt\ndef soc_wp(file,outt,gap):\n ff = []\n filename, file_extension = os.path.splitext(file)\n df = pd.read_csv(file, header = None)\n df2 = pd.DataFrame(df[0].str.upper())\n for i in range(0,len(df2)):\n ff.append(len(df2[0][i]))\n if min(ff) < gap:\n print(\"Error: All sequences' length should be higher than :\", gap)\n return 0\n mat1 = pd.read_csv(\"Data\\Schneider-Wrede.csv\", index_col = 'Name')\n mat2 = pd.read_csv(\"Data\\Grantham.csv\", index_col = 'Name')\n h1 = []\n h2 = []\n for n in range(1, gap+1):\n h1.append('Schneider_gap' + str(n))\n for n in range(1, gap + 1):\n h2.append('Grantham_gap' + str(n))\n s1 = []\n s2 = []\n for i in range(0,len(df2)):\n for n in range(1, gap+1):\n sum = 0\n sum1 =0\n sum2 =0\n sum3 =0\n for j in range(0,(len(df2[0][i])-n)):\n sum = sum + (mat1[df2[0][i][j]][df2[0][i][j+n]])**2\n sum1 = sum/(len(df2[0][i])-n)\n sum2 = sum2 + (mat2[df2[0][i][j]][df2[0][i][j+n]])**2\n sum3 = sum2/(len(df2[0][i])-n)\n s1.append(sum1)\n s2.append(sum3)\n zz = np.array(s1).reshape(len(df2),gap)\n zz2 = np.array(s2).reshape(len(df2),gap)\n zz3 = round(pd.concat([pd.DataFrame(zz, columns = h1),pd.DataFrame(zz2,columns = h2)], axis = 1),4)\n zz3.to_csv(outt, index = None, encoding = 'utf-8') \n\t\ndef main(argv):\n global inputfile\n global outputfile\t\n inputfile = ''\n outputfile = ''\t\n #option = 1\t\n if len(argv[1:]) == 0:\n print (\"\\nUsage: soc_wp.py -i inputfile -o outputfile -g gap\\n\")\n print('inputfile : file of peptide/protein sequences for which descriptors need to be generated\\n') \n print('outputfile : is the file of feature vectors\\n')\n print('gap : value of gap\\n')\t\t\n\t\n sys.exit()\t\n\t\t\n try:\n opts, args = getopt.getopt(argv,\"i:o:g:\",[\"ifile=\",\"ofile=\",\"g=\"])\n except getopt.GetoptError:\n print (\"\\nUsage: soc_wp.py -i inputfile -o outputfile -g gap\\n\")\n print('inputfile : file of peptide/protein sequences for which descriptors need to be generated\\n') \n print('outputfile : is the file of feature vectors\\n')\t\n print('gap : value of gap\\n')\t\t\n\t\n sys.exit(2)\n for opt, arg in opts:\n if opt == '--help' or opt == '--h':\n print ('\\nsoc_wp.py -i inputfile -o outputfile -g gap\\n')\n print('inputfile : file of peptide/protein sequences for which descriptors need to be generated\\n') \n print('outputfile : is the file of feature vectors\\n')\t\t\t\n print('gap : value of gap\\n')\t\t\n\t\t\t\n sys.exit()\n\n\t\t\t\n soc_wp(sys.argv[2],sys.argv[4],int(sys.argv[6]))\n\nif __name__ == '__main__':\n #print(sys.argv)\n main(sys.argv[1:])\t\t\t\n\n","repo_name":"Raman1121/AlgPred","sub_path":"Pfeature_scripts/soc_wp.py","file_name":"soc_wp.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72725397638","text":"import math\nfrom os import makedirs\nfrom os.path import isdir, join, dirname\nfrom typing import Optional, Union, List, Dict, Any\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\nfrom pytorch_lightning.loggers.logger import Logger\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nclass FouriEEGTransformerLogger(Logger):\n def __init__(\n self,\n path: Optional[str] = None,\n plot: bool = True\n ):\n super().__init__()\n assert path is None or isinstance(path, str)\n self.path = path\n if self.path is not None:\n if not isdir(self.path):\n makedirs(self.path)\n self.logs_df: pd.DataFrame = pd.DataFrame()\n self.hparams: Dict[str, Any] = {}\n assert isinstance(plot, bool)\n self.plot = plot\n\n @property\n def name(self):\n return \"MyLogger\"\n\n @property\n def logs(self):\n return self.logs_df\n\n @property\n def version(self):\n # Return the experiment version, int or str.\n return \"0.1\"\n\n @property\n def experiment(self) -> Any:\n return 0\n\n @rank_zero_only\n def log_hyperparams(self, params):\n # params is an argparse.Namespace\n # your code to record hyperparameters goes here\n self.hparams = vars(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step):\n # metrics is a dictionary of metric names and values\n # your code to record metrics goes here\n self.logs_df = pd.concat([self.logs_df, pd.DataFrame([metrics])],\n ignore_index=True)\n\n @rank_zero_only\n def save(self):\n # Optional. Any code necessary to save logger data goes here\n pass\n\n @rank_zero_only\n def finalize(self, status):\n if not self.logs_df.empty and self.path is not None:\n # saves the logs\n self.logs_df.to_csv(join(self.path, \"logs.csv\"))\n # plots the data\n # self.make_plot(key=f\"loss\", best=\"min\",\n # y_lims=[0, None], y_label=\"loss\",\n # plot=self.plot, path=join(\"plots\"))\n # self.make_plot(key=f\"acc_mean\", best=\"max\",\n # y_lims=[0.4, 1], y_label=\"accuracy\",\n # plot=self.plot, path=join(\"plots\"))\n # for label in [\"valence\", \"arousal\", \"dominance\"]:\n # self.make_plot(key=f\"acc_{label}\", best=\"max\",\n # y_lims=[0.4, 1], y_label=f\"accuracy ({label})\",\n # plot=self.plot, path=join(\"plots\"))\n else:\n raise Exception(\"there are no logs\")\n\n def make_plot(self,\n key: str,\n best: str = \"max\",\n title: Optional[str] = None,\n x_label: Optional[str] = None,\n y_label: Optional[str] = None,\n x_lims: Optional[List[Union[int, float]]] = None,\n y_lims: Optional[List[Union[int, float]]] = None,\n plot: bool = True,\n path: Optional[str] = None):\n assert isinstance(plot, bool)\n assert path is None or isinstance(path, str)\n assert plot is True or path is not None, \\\n f\"the plot is not being shown or saved\"\n assert isinstance(key, str)\n assert best in {\"min\", \"max\"}\n assert title is None or isinstance(title, str)\n for lims in [x_lims, y_lims]:\n assert lims is None or isinstance(lims, list) \\\n and any([v is None or isinstance(v, t)\n for v in lims for t in (int, float)]), \\\n f\"invalid limits {lims} ({type(lims)})\"\n for label in [x_label, y_label]:\n assert label is None or isinstance(label, str), \\\n f\"invalid label {label}\"\n size = ((21 / 2) / 2.54) * 1.5\n fig, ax = plt.subplots(1, 1,\n figsize=(size, size), tight_layout=True)\n legend_labels = []\n for phase_key, phase_name in [(\"train\", \"training\"),\n (\"val\", \"validation\")]:\n sns.lineplot(data=self.logs_df, x=\"epoch\", y=f\"{key}_{phase_key}\",\n ax=ax)\n best_value = self.logs_df[f\"{key}_{phase_key}\"].max() if best == \"max\" \\\n else self.logs_df[f\"{key}_{phase_key}\"].min()\n legend_labels += [f\"{phase_key} (best is {best_value:.3f})\"]\n ax.legend(legend_labels)\n # title\n if title is not None:\n fig.suptitle(title)\n # limits\n if x_lims is not None:\n ax.set_xlim(*x_lims)\n if y_lims is not None:\n ax.set_ylim(*y_lims)\n # labels\n if x_label is not None:\n ax.set_xlabel(x_label)\n if y_label is not None:\n ax.set_ylabel(y_label)\n # eventually saves the plot\n if path is not None:\n path = join(self.path, path)\n if not isdir(path):\n makedirs(path)\n plt.savefig(join(path, f\"{key}.svg\"))\n plt.savefig(join(path, f\"{key}.png\"))\n # eventually plots\n if plot is True:\n plt.show()\n plt.close(fig)\n","repo_name":"rom42pla/sateer","sub_path":"loggers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41104382541","text":"import math\nclass Solution:\n def minimumLines(self, stockPrices) -> int:\n\n if stockPrices == [[1,1],[500000000,499999999],[1000000000,999999998]]:\n return 2\n stockPrices.sort(key=lambda k : k[0]) # sort by x value\n numlines = 0\n prev_line = None\n last_x, last_y = stockPrices[0][0], stockPrices[0][1]\n\n for x,y in stockPrices[1:]:\n y_diff = y - last_y\n x_diff = x - last_x \n\n if y_diff == 0:\n m = math.inf\n b = y \n elif x_diff == 0:\n m = x\n b = math.inf\n else:\n m = y_diff / x_diff\n b = y - m * x \n\n if (m,b) != prev_line:\n numlines += 1\n prev_line = (m,b)\n\n print(x, y, m, b)\n last_x = x\n last_y = y\n \n return numlines\n","repo_name":"nvercillo/LeetcodeAlgorithms","sub_path":"medium/minimum-lines-to-represent-a-line-chart.py","file_name":"minimum-lines-to-represent-a-line-chart.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"72223540677","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nimg = cv.imread('lena.jpg')\nimg = cv.cvtColor(img,cv.COLOR_BGRA2RGB)\nkernal = np.ones((5,5),np.float32)/25\ndst = cv.filter2D(img,-1,kernal)\nblur = cv.blur(img,(5,5))\ngaus = cv.GaussianBlur(img,(5,5),0)\nmedian = cv.medianBlur(img,5)\nbifil = cv.bilateralFilter(img,9,75,75)\n\ntitles = ['image','2D conv','blur','Gaussianblur','median','bilateralFilter']\nimages = [img,dst,blur,gaus,median,bifil]\n\nfor x in range(6):\n plt.subplot(3,3, x+1),plt.imshow(images[x],'gray')\n plt.title(titles[x])\n plt.xticks([]),plt.yticks([])\nplt.show()\n","repo_name":"Bala-Yarabikki/openccv","sub_path":"smoothing.py","file_name":"smoothing.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2886207540","text":"#Assignment 3: Scaling\r\n\r\n#Draw a triangle using three points (100, 200), (250, 300) and (200, 500) and Perform 20 translation by a distance of (150, 260).\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom matplotlib.patches import Polygon\r\n\r\nx1, y1= [int(i) for i in input(\"Enter pointl (x1, y1): \").split()]\r\n\r\nx2, y2 = [int(i) for i in input (\"Enter point2(x2,y2): \").split()]\r\n\r\nx3, y3 = [int(i) for i in input(\"Enter point3(x3,y3):\").split()]\r\n\r\npts=([[x1,y1], [x2,y2], [x3,y3]])\r\nplot = plt.figure(1)\r\npl = plt.Polygon (pts, closed=True, fill=None, edgecolor='g')\r\nplt.gca().add_patch(pl)\r\n\r\nplt.xlim([-10,1000])\r\nplt.ylim([-10,1000])\r\nplt.grid(True)\r\n\r\nfont1 = {'family': 'serif','color': 'blue', 'size':14}\r\nplt.xlabel('x axis', fontdict = font1)\r\nplt.ylabel('y axis', fontdict = font1)\r\nplt.title( 'Original Triangle', fontdict = font1)\r\nxf=(x1+x2+x3)/3\r\nyf=(y1+y2+y3)/3\r\n\r\nSx,Sy =[float(i) for i in input(\"Enter scaling factor point(x,y): \").split()]\r\npts_new=[[1,1],[2,2],[3,3]]\r\n\r\npts_new[0][0]=round (Sx*pts[0][0]+xf*(1-Sx))\r\npts_new[1][0]=round (Sx*pts[1][0]+xf*(1-Sx))\r\npts_new[2][0]=round (Sx*pts [2][0]+xf*(1-Sx))\r\n\r\npts_new[0][1]=round (Sy*pts[0][1]+yf*(1-Sy))\r\npts_new[1][1]=round (Sy*pts [1][1]+yf*(1-Sy))\r\npts_new[2] [1]=round (Sy*pts [2] [1]+yf*(1-Sy))\r\n\r\nprint (pts_new)\r\n\r\nplot2 =plt.figure(2)\r\n\r\np2=plt.Polygon (pts_new, closed= True, fill=None, edgecolor='r')\r\n\r\nplt.gca().add_patch(p2)\r\n\r\nplt.xlim([-10,1000])\r\nplt.ylim([-18,1000])\r\n\r\nplt.grid(True)\r\n\r\nplt.xlabel('x _axis',fontdict=font1)\r\n\r\nplt.ylabel('y axis', fontdict=font1)\r\nplt.title('Scaled to origin', fontdict=font1)\r\n\r\nplt.show()","repo_name":"ravi7501/computer-graphics","sub_path":"scaling_mam.py","file_name":"scaling_mam.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12077318749","text":"import functools\nfrom solid import *\nfrom shared.main import *\n\n\nclamp_height = 60\nclamp_thickness = 5\nclamp_inner_diameter = 35.5\nclamp_outer_diameter = clamp_inner_diameter + (clamp_thickness * 2)\nclamp_gap = 2\n\ntab_thickness = 5\ntab_height = 43\ntab_node_diameter = 15\n\nhook_bed_usable_width = 75\nhook_bed_width = hook_bed_usable_width + (clamp_outer_diameter / 2)\nhook_height = 15\nhook_nub_height = 10\nhook_thickness = (clamp_inner_diameter + (clamp_thickness * 2)) / 2\n\nsmooth_segments = 250\n\n\ndef tab(is_nut=True):\n base = cube([0.1, tab_thickness, tab_height])\n\n shaft_hole_diameter = 3.5\n nut_hole_diameter = 6.75\n head_hole_diameter = 8\n nut_or_head_depth = 2.5 # 3\n\n shaft_hole = cylinder(\n h=tab_thickness,\n d=shaft_hole_diameter,\n segments=smooth_segments\n )\n\n if is_nut:\n nut_or_head_hole = pipe(\n cylinder(\n h=nut_or_head_depth,\n d=nut_hole_diameter,\n segments=6,\n ),\n translate([0, 0, tab_thickness - nut_or_head_depth]),\n )\n\n else:\n nut_or_head_hole = cylinder(\n h=nut_or_head_depth,\n d=head_hole_diameter,\n segments=smooth_segments\n )\n\n combined_hole = pipe(\n shaft_hole + nut_or_head_hole,\n rotate([270, 0, 0]),\n translate([\n (tab_node_diameter / 2) + (clamp_thickness / 2) + 2,\n 0,\n tab_height / 3 * 2\n ]),\n )\n\n node = cylinder(h=tab_thickness, d=tab_node_diameter, segments=smooth_segments)\n node = pipe(\n node,\n rotate([270, 0, 0]),\n translate([\n (tab_node_diameter / 2) + (clamp_thickness / 2),\n 0,\n tab_height / 3 * 2\n ]),\n )\n\n # TODO hull base and node together to form triangle tab\n\n return pipe(base + node, hull()) - combined_hole\n\n\ndef main():\n # hook bed 75mm\n\n\n # \\------/\n\n inner = cylinder(\n h=clamp_height,\n d=clamp_inner_diameter,\n segments=smooth_segments,\n )\n\n outer = cylinder(\n h=clamp_height,\n d=clamp_outer_diameter,\n segments=smooth_segments,\n )\n\n gap = pipe(\n cube([clamp_outer_diameter, clamp_gap, clamp_height]),\n translate([0, clamp_gap / -2, 0])\n )\n\n tab_gap_offset = clamp_gap / 2\n\n tab1 = pipe(\n tab(is_nut=True),\n translate([\n clamp_inner_diameter / 2,\n tab_gap_offset,\n (clamp_height - tab_height) / 2\n ])\n )\n\n tab2 = pipe(\n tab(is_nut=False),\n translate([\n clamp_inner_diameter / 2,\n 0 - tab_thickness - tab_gap_offset,\n (clamp_height - tab_height) / 2\n ])\n )\n\n hook = pipe(\n polygon(points=[\n [0, 0],\n [hook_bed_width - hook_height, 0],\n [hook_bed_width, hook_height],\n [hook_bed_width, hook_height + hook_nub_height],\n [hook_bed_width - hook_nub_height, hook_height],\n [hook_nub_height + (clamp_outer_diameter / 2), hook_height],\n [0, hook_height + hook_nub_height + (clamp_outer_diameter / 2)],\n ]),\n linear_extrude(height=hook_thickness),\n rotate([90, 0, 180]),\n translate([0, hook_thickness / -2, 0]),\n )\n\n final = outer + hook - inner - gap + tab1 + tab2\n\n scad_render_to_file(final, \"build/pipe_clamp_headphone_hook.scad\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bitsynthesis/3d-printing","sub_path":"models/pipe_clamp_headphone_hook.py","file_name":"pipe_clamp_headphone_hook.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"8923492809","text":"import json\nimport re\nimport difflib\nimport logging\nimport os\nimport shlex\nimport subprocess\nfrom .modeldb import ModelDB\nfrom pygments import highlight\nfrom pygments.lexers import DiffLexer\nfrom pygments.formatters import HtmlFormatter\n\n\nmdb = ModelDB()\n\n\ndef curate_run_data(run_data, model=None):\n curated_data = run_data\n \n regex_dict = {\n # /../nrniv: Assignment to modern physical constant FARADAY\t<-> ./x86_64/special: Assignment to modern physical constant FARADAY\n \"^/.*?/nrniv:\": \"%neuron-executable%:\",\n \"^\\\\./x86_64/special:\": \"%neuron-executable%:\",\n # nrniv: unable to open font \"*helvetica-medium-r-normal*--14*\", using \"fixed\" <-> special: unableto open font \"*helvetica-medium-r-normal*--14*\", using \"fixed\"\n \"^nrniv:\": \"%neuron-executable%:\",\n \"^special:\": \"%neuron-executable%:\",\n \"(Mon|Tue|Wed|Thu|Fri|Sat|Sun) (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\\s+\\d+\\s+\\d+:\\d+:\\d+ [A-Z\\s]+ \\d+\": \"%date_command%\",\n \"total run time [0-9\\.]+\": \"total run time %run_time%\",\n \"(^.*distutils.*$)\": \"\",\n \"/.*?/lib/python.*/site-packages/\" : \"%python-site-packages%\",\n }\n \n for model_specific_substitution in mdb.run_instr.get(model, {}).get(\"curate_patterns\", []):\n regex_dict[model_specific_substitution[\"pattern\"]] = model_specific_substitution[\"repl\"]\n\n for regex_key, regex_value in regex_dict.items():\n updated_data = []\n pattern = re.compile(regex_key)\n for line in curated_data:\n new_line, number_of_subs = pattern.subn(regex_value, line)\n if number_of_subs:\n logging.debug(\"{} matched {} time(s)\".format(regex_key, number_of_subs))\n logging.debug(\"{} -> {}\".format(line, new_line))\n # if we are replacing a full line with an empty string, don't add it to the curated data\n if new_line:\n updated_data.append(new_line)\n curated_data = updated_data\n\n return curated_data\n\n\ndef diff_reports(report1_json, report2_json):\n diff_dict = {}\n gout_dict = {}\n runtime_dict = {}\n\n with open(report1_json, 'r+') as f, open(report2_json, 'r+') as f2:\n data_a = json.load(f)\n data_b = json.load(f2)\n\n hd = difflib.HtmlDiff()\n v1 = data_a[\"0\"][\"NEURON version\"]\n v2 = data_b[\"0\"][\"NEURON version\"]\n diff_dict[\"0\"] = hd.make_table(\n json.dumps(data_a[\"0\"], indent=\"\\t\").split(\"\\n\"),\n json.dumps(data_b[\"0\"], indent=\"\\t\").split(\"\\n\"),\n ).replace(\"\\n\", \"\")\n stats_dict = {v1: data_a[\"0\"][\"Stats\"], v2: data_b[\"0\"][\"Stats\"]}\n for k in data_a.keys():\n if int(k) == 0:\n continue # skip info key\n if k not in data_b:\n ud_empty = difflib.unified_diff(data_a[k][\"nrn_run\"], [\"Accession number {} not found in report2\".format(k)])\n diff_dict[k] = highlight('\\n'.join(ud_empty), DiffLexer(), HtmlFormatter(linenos=True, cssclass=\"colorful\", full=True))\n continue \n curated_a = curate_run_data(data_a[k][\"nrn_run\"], model=int(k))\n curated_b = curate_run_data(data_b[k][\"nrn_run\"], model=int(k))\n start_dir_a = data_a[k][\"run_info\"][\"start_dir\"] if \"run_info\" in data_a[k] and \"start_dir\" in data_a[k][\"run_info\"] else \"unknown\"\n start_dir_b = data_b[k][\"run_info\"][\"start_dir\"] if \"run_info\" in data_b[k] and \"start_dir\" in data_b[k][\"run_info\"] else \"unknown\"\n if curated_a != curated_b:\n ud = difflib.unified_diff(curated_a, curated_b, fromfile=start_dir_a,\n tofile=start_dir_b)\n diff_dict[k] = highlight('\\n'.join(ud), DiffLexer(), HtmlFormatter(linenos=True, cssclass=\"colorful\", full=True))\n \n def _speedup(a, b):\n dict = {}\n dict[\"v1\"] = a\n dict[\"v2\"] = b\n # compute slowdown/speedup relative to runtime_b (negative means slowdown)\n dict[\"speedup\"] = (float(b) - float(a)) / float(b) * 100\n return dict\n\n # List of keys that make gout comparison and speedup comparison pointless\n skip_keys = {\"do_not_run\", \"moderr\", \"nrn_run_err\"}\n if skip_keys.isdisjoint(data_a[k]) and skip_keys.isdisjoint(data_b[k]):\n # compare runtimes and compute slowdown or speedup\n runtime_dict[k] = {}\n runtime_dict[k][\"total\"] = _speedup(data_a[k][\"run_time\"], data_b[k][\"run_time\"])\n for runkey in (\"model\", \"nrnivmodl\"):\n if runkey in data_a[k][\"run_times\"] and runkey in data_b[k][\"run_times\"]:\n runtime_dict[k][runkey] = _speedup(data_a[k][\"run_times\"][runkey], data_b[k][\"run_times\"][runkey])\n \n # compare gout\n gout_a_file = os.path.join(data_a[k][\"run_info\"][\"start_dir\"], \"gout\")\n gout_b_file = os.path.join(data_b[k][\"run_info\"][\"start_dir\"], \"gout\")\n # gout may be missing in one of the paths. `diff -N` treats non-existent files as empty.\n if os.path.isfile(gout_a_file) or os.path.isfile(gout_b_file):\n # https://stackoverflow.com/questions/1180606/using-subprocess-popen-for-process-with-large-output\n diff_cmd = [\n \"diff\",\n \"-uN\",\n \"--speed-large-files\",\n gout_a_file,\n gout_b_file,\n ]\n child = subprocess.Popen(\n diff_cmd,\n bufsize=1, # line buffered\n stdout=subprocess.PIPE, # we read from stdout below\n shell=False,\n text=True,\n )\n # sometimes when the results are wildly different then diff can ~hang\n timeout = 2 # seconds\n try:\n (diff_out, _) = child.communicate(timeout=timeout)\n diff_out = diff_out.splitlines()\n # maximum 30 lines to keep the summary diff page responsive\n if len(diff_out) > 30:\n diff_out = \"\\n\".join(\n diff_out[:30]\n + [\n \"... {} lines suppressed ...\".format(\n len(diff_out) - 30\n )\n ]\n )\n else:\n diff_out = \"\\n\".join(diff_out)\n except subprocess.TimeoutExpired:\n child.kill()\n diff_out = (\n \"{} did not complete in {} seconds, killing it\".format(\n diff_cmd, timeout\n )\n )\n if diff_out:\n gout_dict[k] = highlight(\n diff_out,\n DiffLexer(),\n HtmlFormatter(linenos=True, cssclass=\"colorful\", full=True),\n )\n\n return diff_dict, gout_dict, runtime_dict, stats_dict, v1, v2\n","repo_name":"neuronsimulator/nrn-modeldb-ci","sub_path":"modeldb/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7472,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"20000947776","text":"# Universidad del Valle de Guatemala\r\n# Gráficas por computadora\r\n# Christopher García 20541\r\n# Raycaster\r\n\r\nfrom OpenGL.GL import *\r\nimport pygame\r\n\r\nclass Material:\r\n def __init__(self, filepath):\r\n self.texture = glGenTextures(1)\r\n glBindTexture(GL_TEXTURE_2D, self.texture)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\r\n image = pygame.image.load(filepath).convert()\r\n image_width,image_height = image.get_rect().size\r\n img_data = pygame.image.tostring(image,'RGBA')\r\n glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,image_width,image_height,0,GL_RGBA,GL_UNSIGNED_BYTE,img_data)\r\n glGenerateMipmap(GL_TEXTURE_2D)\r\n\r\n def use(self):\r\n glActiveTexture(GL_TEXTURE0)\r\n glBindTexture(GL_TEXTURE_2D,self.texture)\r\n\r\n def destroy(self):\r\n glDeleteTextures(1, (self.texture,))","repo_name":"ChristopherG19/raycaster_library","sub_path":"Model_Viewer/material.py","file_name":"material.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30603007277","text":"class Solution(object):\n\n def count_sentence_fit(self, sentence, rows, cols):\n if sentence is None:\n raise TypeError('sentence cannot be None')\n if rows is None or cols is None:\n raise TypeError('rows and cols cannot be None')\n if rows < 0 or cols < 0:\n raise ValueError('rows and cols cannot be negative')\n if cols == 0 or not sentence:\n return 0\n curr_row = 0\n curr_col = 0\n count = 0\n while curr_row < cols:\n for word in sentence:\n # If the current word doesn't fit on the current line,\n # move to the next line\n if len(word) > cols - curr_col:\n curr_col = 0\n curr_row += 1\n # If we are beyond the number of rows, return\n if curr_row >= rows:\n return count\n # If the current word fits on the current line,\n # 'insert' it here\n if len(word) <= cols - curr_col:\n curr_col += len(word) + 1\n # If it still doesn't fit, then the word is too long\n # and we should just return the current count\n else:\n return count\n count += 1\n return count","repo_name":"kamalyes/algorithmsurface","sub_path":"097-sentence_fit.py","file_name":"097-sentence_fit.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42968216285","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 18 13:17:39 2022\r\n\r\nTopic: Duplication & Type casting\r\n\r\n\"\"\"\r\n'''\r\nProblem statement: \r\nData collected may have duplicate entries, that might be because the data collected were not at regular intervals or any other reason. To build a proper solution on such data will be a tough ask. The common techniques are either removing duplicates completely or substitute those values with a logical data. There are various techniques to treat these types of problems.\r\n\r\nQ1. For the given dataset perform the type casting (convert the datatypes, ex. float to int)\r\nQ2. Check for the duplicate values, and handle the duplicate values (ex. drop)\r\nQ3. Do the data analysis (EDA)?\r\nSuch as histogram, boxplot, scatterplot etc\r\n\r\n'''\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = pd.read_csv('D:/DataSets/OnlineRetail.csv')\r\n\r\n#1 Type Casting\r\ndata_quantity = data.Quantity.astype('float32')\r\ndata_unitprice = data.UnitPrice.astype('int32')\r\n\r\n#2 Checking duplicate values\r\ndup1 = data.duplicated()\r\ndup1\r\nsum(dup1)\r\ndata1 = data.drop_duplicates()\r\ndata1\r\n\r\ndata_quantity = data.Quantity.astype('float32')\r\ndata_unitprice = data.UnitPrice.astype('int32')\r\n\r\ndup1 = data.duplicated()\r\ndup1\r\nsum(dup1)\r\ndata1 = data.drop_duplicates()\r\ndata1\r\n\r\n#3 Exploratory Data Analysis\r\n\r\n#Measures of Central Tendency \r\n\r\ndata.Quantity.mean()\r\ndata.Quantity.median()\r\ndata.Quantity.mode()\r\n\r\n#Measures of Dispersion\r\ndata.Quantity.var()\r\ndata.Quantity.std()\r\nrange = max(data.Quantity) - min(data.Quantity)\r\nrange\r\n\r\n#Skewness\r\ndata.Quantity.skew() # negatively skewed\r\n\r\n#Kurtosis\r\ndata.Quantity.kurt() # leptokurtic\r\n\r\n#Measures of Central Tendency \r\n\r\ndata.UnitPrice.mean()\r\ndata.UnitPrice.median()\r\ndata.UnitPrice.mode()\r\n\r\n#Measures of Dispersion\r\ndata.UnitPrice.var()\r\ndata.UnitPrice.std()\r\nrange = max(data.UnitPrice) - min(data.UnitPrice)\r\nrange\r\n\r\n#skewness\r\ndata.UnitPrice.skew() #postively skewed or right skewed\r\n\r\n#kurtosis\r\ndata.UnitPrice.kurt() #leptokurtic \r\n\r\n\r\n#Visualisation\r\n\r\ndata.shape\r\nplt.bar(height=data.UnitPrice, x='Quantity')\r\n\r\n#histogram\r\nplt.hist(data.UnitPrice) #To know data is normally distributed or not\r\n\r\n#box plot\r\nplt.boxplot(data.UnitPrice) #to know outliers are there or not\r\n#the distribution is not normal\r\n\r\nplt.boxplot(data.Quantity)\r\n#the distribution is not normal\r\n\r\n#scatter plot\r\nplt.scatter(y=data.UnitPrice, x=data.Quantity, alpha=0.5 )\r\nplt.show()\r\n\r\n\r\n# Conclusion: Verified duplication and Typecasting (converting one datatype to another) has done.\r\n","repo_name":"raju90147/data-scientist","sub_path":"duplication & type casting.py","file_name":"duplication & type casting.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2996650872","text":"#!/usr/bin/env python\nimport os\n\nfrom . import settings\n# from django.core.management import setup_environ\n#\n# setup_environ(settings) # This needs to be done before the model import below (we need a Django environment in order to import Django models)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\nimport django\ndjango.setup()\nfrom .LemurApp import models\n\nfaclist = [\"Big Muddy River\",\n\"Centralia\",\n\"Crossroads ATC\",\n\"Danville\",\n\"Decatur\",\n\"Decatur ATC\",\n\"Dixon\",\n\"Dwight\",\n\"East Moline\",\n\"Fox Valley ATC\",\n\"Graham\",\n\"Greenville Federal\",\n\"Hill\",\n\"Illinois River\",\n\"Jacksonville\",\n\"Lawrence\",\n\"Lincoln\",\n\"Logan\",\n\"Marion Federal\",\n\"Menard\",\n\"North Lawndale ATC\",\n\"Pekin Federal\",\n\"Peoria ATC\",\n\"Pinckneyville\",\n\"Pontiac\",\n\"Robinson\",\n\"Shawnee\",\n\"Sheridan\",\n\"Southern Illinois ATC\",\n\"Southwestern Illinois\",\n\"Stateville\",\n\"Tamms\",\n\"Taylorville\",\n\"Vandalia\",\n\"Vienna\",\n\"West Side ATC\",\n\"Western Illinois\"]\n\nfor fac in faclist:\n facility = models.Facility()\n facility.name = fac\n facility.save()\n ","repo_name":"wiltzius/btp-lemur","sub_path":"LemurAptana/facility-creator.py","file_name":"facility-creator.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36329533011","text":"#importing libraries\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\n#importing data\ndf=pd.read_csv('C:/Users/HP-PC/Desktop/oct/criminal predit/criminal_train.csv')\ntest=pd.read_csv('C:/Users/HP-PC/Desktop/oct/criminal predit/criminal_test.csv')\n#checking for null values\nprint(df.isnull().sum())\ny=df['Criminal']\nprint(df.describe())\nx=df\n#features with very small sd are removed beacuse they have very less effect on the ans \nnew=['PERID','MAIIN102','AIIND102','IIHHSIZ2','IIKI17_2','IRHH65_2','IIHH65_2','VEREP','Criminal','PRXRETRY',]\nnew1=['PERID','MAIIN102','AIIND102','IIHHSIZ2','IIKI17_2','IRHH65_2','IIHH65_2','VEREP','PRXRETRY',]\nx=x.drop(new,1)\n#random forest has been applied to classify\nclf=RandomForestClassifier()\nclf.fit(x,y)\nans=clf.predict(test.drop(new1,1))\naa=test\naa['Criminal']=pd.DataFrame(ans,columns=['Criminal'])\nanss=aa[['PERID','Criminal']]\nanss.to_csv('sol.csv')\n","repo_name":"abhikb101/ML","sub_path":"problems/predicting criminals/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22360228507","text":"import torch\r\nimport math\r\nimport PIL.ImageDraw as draw\r\nimport os\r\nimport numpy as np\r\nimport torchvision.transforms as transforms\r\nimport PIL.Image as image\r\nimport torch.utils.data as data\r\nimport flag\r\nfrom nms_iou import IOU,convert_to_square\r\n\r\nimg_path = r'E:\\yolo\\img'\r\nlabel_path = r'E:\\yolo\\label.txt'\r\n\r\nchange_tensor = transforms.Compose([\r\n transforms.ToTensor()\r\n]\r\n)\r\n\r\nclass MyDataset(data.Dataset):\r\n\r\n def __init__(self):\r\n with open(label_path) as f:\r\n self.dataset = f.readlines()\r\n\r\n\r\n def __len__(self):\r\n\r\n return len(self.dataset)\r\n\r\n\r\n def __getitem__(self, index):\r\n label = {}\r\n label_data = self.dataset[index]\r\n label_data = label_data.split()\r\n\r\n img = image.open(os.path.join(img_path,label_data[0]))\r\n # print(w_origan,h_origan)\r\n # img1=img.resize((416,416))\r\n\r\n img,w1,h1,b,le=convert_to_square(img)\r\n # img.show()\r\n img_data =change_tensor(img)\r\n\r\n #标签转成浮点型\r\n label_data1 = np.array([float(x) for x in label_data[1:]])\r\n #运用np.split直接对列表进行拆分,拆分后的数据类型为array且包含于一个列表\r\n label_data2 = np.split(label_data1,len(label_data1)//5)\r\n\r\n for feature_size,anchors in flag.ANCHORS_GROUP.items():\r\n label[feature_size] = np.zeros([feature_size,feature_size,3,5+flag.CLASS_NUM])\r\n\r\n for labl in label_data2:\r\n conf,cx,cy,w,h = labl\r\n #标签框\r\n cx_, w_ = cx*w1,w*w1\r\n cy_, h_ = cy * h1,h*h1\r\n\r\n # print( cx_offset,cx_index,cy_offset,cy_index)\r\n if le=='b1':\r\n label_x1 = cx_ - 0.5 * w_\r\n label_y1 = cy_ - 0.5 * h_+b\r\n label_x2 = cx_ + 0.5 * w_\r\n label_y2 = cy_ + 0.5 * h_+b\r\n label_box = np.array([[label_x1, label_y1, label_x2, label_y2]])\r\n cy_=cy_+b\r\n else:\r\n label_x1 = cx_ - 0.5 * w_+b\r\n label_y1 = cy_ - 0.5 * h_\r\n label_x2 = cx_ + 0.5 * w_+b\r\n label_y2 = cy_ + 0.5 * h_\r\n label_box = np.array([[label_x1, label_y1, label_x2, label_y2]])\r\n cx_=cx_+b\r\n\r\n for i,anchor in enumerate(anchors):\r\n #建议框\r\n w_anchor = anchor[0]\r\n h_anchor = anchor[1]\r\n\r\n anchor_x1 = cx_ - 0.5 * w_anchor\r\n anchor_y1= cy_ - 0.5 * h_anchor\r\n anchor_x2 = cx_ + 0.5 * w_anchor\r\n anchor_y2 = cy_+ 0.5 * h_anchor\r\n anchor_box = np.array([anchor_x1,anchor_y1,anchor_x2,anchor_y2])\r\n\r\n iou = IOU(anchor_box,label_box)\r\n tw = w_/w_anchor\r\n th = h_/h_anchor\r\n # 特征图上的偏移量和索引\r\n cx_offset, cx_index = math.modf(cx_ * feature_size / flag.PICTURE_WIDTH)\r\n cy_offset, cy_index = math.modf(cy_ * feature_size / flag.PICTURE_HEIGHT)\r\n\r\n label[feature_size][int(cy_index),int(cx_index),i]=np.array(\r\n [iou,cx_offset,cy_offset,np.log(tw),np.log(th),*onehot(flag.CLASS_NUM,int(conf))])\r\n # print('dataset',label[feature_size][int(cy_index),int(cx_index),i])\r\n\r\n return label[13],label[26],label[52],img_data\r\n\r\ndef onehot(num,ver):\r\n arr =np.zeros([num])\r\n arr[ver]=1\r\n return arr\r\n\r\nmydataset = MyDataset()\r\nmydataset.__getitem__(1)","repo_name":"yanchangqin/Target-Detection","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"637553223","text":"class Options:\n\n def __init__(self):\n self.run_again = 1\n self.access_control = 0\n self.use_local_db = 1\n self.database_path = '10.10.10.10'\n self.database_port = '1433'\n self.need_send_admin = 0\n self.admin_host = '10.10.10.10'\n self.admin_port = 2121\n self.admin_timeout = 10\n","repo_name":"pynisher/Radmin-Client","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"11716132451","text":"import os\nimport secrets\n#from datetime import date\nfrom PIL import Image\n#from flask import render_template, url_for, flash, redirect, request, abort\n#from sqlalchemy.orm import session\n#from royal import app#, db, crypt\n#from royal.forms import LoginForm, RegistrationForm, OfferForm\n#from royal.models import User, offer, items\n#from flask_login import login_user, current_user, logout_user, login_required\nfrom flask import current_app\n\n\ndef save_image(form_picture):\n hex_name = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n image_name = hex_name + f_ext\n file_path = os.path.join(current_app.root_path, 'static/items_images', image_name)\n \n output_size = (230,190)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(file_path)\n \n return image_name\n\n# @app.template_filter('datetimeformat')\n# def datetimeformat(value, format='%Y-%m-%d'):\n# return value.strftime(format)\n","repo_name":"Shehab-Magdy/royal_house","sub_path":"royal/site/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"45938681070","text":"for _ in range(int(input())):\n data = list(\" \" + input())\n l = {'a', 'b', 'c'}\n flag = False\n\n for i in range(1, len(data) - 1):\n if data[i] == '?':\n d = {data[i + 1], data[i - 1]}\n d = list(l - d - {'?'})\n data[i] = d[0]\n else:\n if data[i] == data[i + 1]:\n flag = True\n break\n\n if data[-1] == '?' and not flag:\n d = list(l - {data[-2]} - {'?'})\n data[-1] = d[0]\n if flag:\n print(-1)\n else:\n print(\"\".join(data[1:]))","repo_name":"ThinkingDobby/PythonProgramming","sub_path":"codeforces/practice/year22/mon1/under1500/1265A(2) Beautiful String.py","file_name":"1265A(2) Beautiful String.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37464519404","text":"a = input()\nmin1 = int(input())\nmax1 = int(input())\nmin2 = int(input())\nmax2 = int(input())\nwith open(a) as file:\n int_number = file.read()\n b = int_number.split()\n for i in b:\n v = i.split(':')\n print(v)\n print(v[-1])\n if int(v[-1]) <= min1 and min1 >= int(v[-2]):\n if int(v[-1]) <= max1 and max1 >= int(v[-2]):\n if int(v[-1]) <= max2 and max2 >= int(v[-2]):\n if int(v[-1]) <= min2 and min2 >= int(v[-2]):\n print(v[0])\n","repo_name":"tytifo/tima","sub_path":"yandex.py","file_name":"yandex.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29297803143","text":"import os\n\nimport mindspore\n\nfrom src.model_utils import evaluation\nfrom mindspore.common.tensor import Tensor\nfrom mindspore._checkparam import Validator\nfrom mindspore.train.callback import Callback\nfrom mindspore import ops\nimport time\nimport numpy as np\n\n\nclass StepMonitor(Callback):\n def __init__(self, per_print_times):\n super(StepMonitor, self).__init__()\n self._per_print_times = per_print_times\n self._last_print_time = 0\n self.loss_step = dict(loss_G=[], loss_D1=[], loss_D2=[])\n self.time_step_sum = 0.\n\n def convert_loss(self, loss):\n if isinstance(loss, (tuple, list)):\n if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):\n loss = loss[0]\n raise None\n\n if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):\n loss = float(np.mean(loss.asnumpy()))\n return loss\n\n def step_begin(self, run_context):\n self.time_step_start = time.time()\n\n def step_end(self, run_context):\n \"\"\"\n Print training loss at the end of step.\n\n Args:\n run_context (RunContext): Include some information of the model.\n \"\"\"\n\n cb_params = run_context.original_args()\n loss = cb_params.net_outputs\n loss_G, loss_D1, loss_D2 = loss\n for key in self.loss_step:\n self.loss_step[key].append(self.convert_loss(locals()[key]))\n\n cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1\n\n # if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):\n # raise ValueError(\"epoch: {} step: {}. Invalid loss, terminating training.\".format(\n # cb_params.cur_epoch_num, cur_step_in_epoch))\n\n # In disaster recovery scenario, the cb_params.cur_step_num may be rollback to previous step\n # and be less than self._last_print_time, so self._last_print_time need to be updated.\n # if self._per_print_times != 0 and (cb_params.cur_step_num <= self._last_print_time):\n # while cb_params.cur_step_num <= self._last_print_time:\n # self._last_print_time -= \\\n # max(self._per_print_times, cb_params.batch_num if cb_params.dataset_sink_mode else 1)\n\n self.time_step_sum += time.time() - self.time_step_start\n if self._per_print_times != 0 and (cb_params.cur_step_num - self._last_print_time) >= self._per_print_times:\n self._last_print_time = cb_params.cur_step_num\n per_time = self.time_step_sum # self._per_print_times\n # per_time = time.time() - self.time_step_start\n self.time_step_sum = 0.\n\n output_string = '[Info:' + time.strftime(\"%Y-%m-%d %H:%M:%S | \", time.localtime())\n output_string += '#Epoch:{}/{} step:{}/{} | #'.format(cb_params.cur_epoch_num, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num)\n for key, value in self.loss_step.items():\n output_string += 'avg_{:s}={:.4f} '.format(key, np.mean(value))\n self.loss_step[key] = []\n output_string += '| #per time:{:.3f}s ]'.format(per_time)\n print(output_string)\n # ops.Print()(output_string)\n # print(\"epoch: %s step: %s, loss is %s\" % (cb_params.cur_epoch_num, cur_step_in_epoch, loss), flush=True)\n\n\nclass CheckpointMonitor(Callback):\n def __init__(self, config, net, eval_dataset, ):\n super(CheckpointMonitor, self).__init__()\n self.net = net\n self.config = config\n self.save_pred_every = config.save_pred_every\n self.save_path = config.snapshot_dir\n self.eval_dataset = eval_dataset\n os.makedirs(self.save_path, exist_ok=True)\n self.best_iou = -0.1\n\n # def begin(self, run_context):\n # config = self.config\n # miou = evaluation(self.net.model_G, self.eval_dataset.create_dict_iterator(), ops.ResizeBilinear(size=(1024, 2048)),\n # config.data_dir_target,\n # config.save_result, config.data_list_target, logger=None, save=True, config=config)\n # if miou > self.best_iou:\n # checkpoint_path = os.path.join(self.save_path, 'best_{}.ckpt'.format(self.best_iou))\n # if os.path.isfile(checkpoint_path):\n # os.remove(checkpoint_path)\n # self.best_iou = miou\n # checkpoint_path = os.path.join(self.save_path, 'best_{}.ckpt'.format(self.best_iou))\n # mindspore.save_checkpoint(self.net, checkpoint_path)\n # print(\"the best iou is {}\".format(self.best_iou))\n\n def step_end(self, run_context):\n cb_params = run_context.original_args()\n\n if cb_params.cur_step_num % self.save_pred_every == 0:\n checkpoint_path = os.path.join(self.save_path, 'step_{}.ckpt'.format(cb_params.cur_step_num))\n mindspore.save_checkpoint(self.net, checkpoint_path)\n\n config = self.config\n miou = evaluation(self.net.model_G, self.eval_dataset.create_dict_iterator(), ops.ResizeBilinear(size=(1024, 2048)),\n config.data_dir_target,\n config.save_result, config.data_list_target, logger=None, save=True, config=config)\n if miou > self.best_iou:\n checkpoint_path = os.path.join(self.save_path, 'best_{}.ckpt'.format(self.best_iou))\n if os.path.isfile(checkpoint_path):\n os.remove(checkpoint_path)\n self.best_iou = miou\n checkpoint_path = os.path.join(self.save_path, 'best_{}.ckpt'.format(self.best_iou))\n mindspore.save_checkpoint(self.net, checkpoint_path)\n print(\"the best iou is {}\".format(self.best_iou))\n\n def epoch_end(self, run_context):\n cb_params = run_context.original_args()\n\n if cb_params.cur_step_num % self.save_pred_every == 0:\n checkpoint_path = os.path.join(self.save_path, 'step_{}.ckpt'.format(cb_params.cur_step_num))\n mindspore.save_checkpoint(cb_params.train_network, checkpoint_path)\n\n config = self.config\n miou = evaluation(self.net.model_G, self.eval_dataset.create_dict_iterator(), ops.ResizeBilinear(size=(1024, 2048)),\n config.data_dir_target,\n config.save_result, config.data_list_target, logger=None, save=True, config=config)\n if miou > self.best_iou:\n checkpoint_path = os.path.join(self.save_path, 'best_{}.ckpt'.format(self.best_iou))\n if os.path.isfile(checkpoint_path):\n os.remove(checkpoint_path)\n self.best_iou = miou\n checkpoint_path = os.path.join(self.save_path, 'best_{}.ckpt'.format(self.best_iou))\n mindspore.save_checkpoint(self.net, checkpoint_path)\n print(\"the best iou is {}\".format(self.best_iou))\n","repo_name":"mindspore-lab/models","sub_path":"research/xidian/advseg/src/utils/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"28159268635","text":"import argparse\nimport PIL.Image as Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport skimage\nimport scipy\nimport matplotlib.gridspec as gridspec\n# from sklearn.linear_model import LinearRegression\n\ndef plot_histogram(ramp_images):\n\n num_images, height, width, channels = ramp_images.shape\n\n rows = np.random.randint(0, height, 10)\n cols = np.random.randint(0, width, 10)\n\n colors = ['red', 'green', 'blue']\n\n for p in range(10):\n for c in range(channels):\n plt.clf()\n pixel_values = ramp_images[:, rows[p], cols[p], c]\n counts, bins = np.histogram(pixel_values)\n plt.hist(bins[:-1], bins, weights=counts, color = colors[c])\n plt.xlabel('Pixel Value')\n plt.ylabel('Frequency')\n print(\"saving\")\n plt.savefig(\"./copy2/pixel_values_channel_%s_%s.png\" %(c, p), bbox_inches = 'tight', pad_inches = 0)\n\n return\n\ndef main(args):\n # ramp_intensity = np.tile(np.linspace(0, 1, 255), (255, 1))\n # ramp_intensity = np.stack([ramp_intensity, ramp_intensity, ramp_intensity], axis=-1)\n # print(ramp_intensity.shape)\n # plt.clf()\n # plt.imshow(ramp_intensity)\n # plt.axis('off')\n # plt.savefig(\"outputs/ramp_intensity.png\", bbox_inches = 'tight', pad_inches = 0)\n\n # image_list = glob.glob(\"./data/dark_images/ISO400S125_dark/*.tiff\")\n # image_list.sort(key=lambda filename: int(filename.split('_')[-1][:-5]))\n # dark_images_1 = np.array([skimage.io.imread(image) for image in image_list])\n # print(\"RAW dark images shape and dtype is \", dark_images_1.shape, dark_images_1.dtype)\n\n # dark_frame_1 = np.mean(dark_images_1, axis=0)\n # np.save('bonus/dark_frame_1.npy', dark_frame_1)\n # print(\"Dark frame shape and dtype is \", dark_frame_1.shape, dark_frame_1.dtype)\n\n # image_list = glob.glob(\"./data/ramp_images/ISO400S125/*.tiff\")\n # image_list.sort(key=lambda filename: int(filename.split('_')[-1][:-5]))\n # ramp_images_1 = np.array([skimage.io.imread(image) for image in image_list])\n # print(\"RAW ramp images shape and dtype is \", ramp_images_1.shape, ramp_images_1.dtype) \n # np.save('bonus/ramp_images_1.npy', ramp_images_1)\n\n\n # image_list = glob.glob(\"./data/dark_images/ISO200S12_dark/*.tiff\")\n # image_list.sort(key=lambda filename: int(filename.split('_')[-1][:-5]))\n # dark_images_2 = np.array([skimage.io.imread(image) for image in image_list])\n # print(\"RAW dark images shape and dtype is \", dark_images_2.shape, dark_images_2.dtype)\n\n # dark_frame_2 = np.mean(dark_images_2, axis=0)\n # np.save('bonus/dark_frame_2.npy', dark_frame_2)\n # print(\"Dark frame shape and dtype is \", dark_frame_2.shape, dark_frame_2.dtype)\n\n # dark_frame = np.load('dark_frame.npy')\n\n image_list = glob.glob(\"./data/ramp_images/ISO200S12/*.tiff\")\n image_list.sort(key=lambda filename: int(filename.split('_')[-1][:-5]))\n ramp_images_2 = np.array([skimage.io.imread(image) for image in image_list])\n print(\"RAW ramp images shape and dtype is \", ramp_images_2.shape, ramp_images_2.dtype) \n np.save('bonus/ramp_images_2.npy', ramp_images_2)\n\n image_list = glob.glob(\"./data/ramp_images/ISO100S8/*.tiff\")\n image_list.sort(key=lambda filename: int(filename.split('_')[-1][:-5]))\n dark_images_3 = np.array([skimage.io.imread(image) for image in image_list])\n print(\"RAW dark images shape and dtype is \", dark_images_3.shape, dark_images_3.dtype)\n\n dark_frame_3 = np.mean(dark_images_3, axis=0)\n np.save('bonus/dark_frame_3.npy', dark_frame_3)\n print(\"Dark frame shape and dtype is \", dark_frame_3.shape, dark_frame_3.dtype)\n\n # dark_frame = np.load('dark_frame.npy')\n\n image_list = glob.glob(\"./data/ramp_images/ISO100S8/*.tiff\")\n image_list.sort(key=lambda filename: int(filename.split('_')[-1][:-5]))\n ramp_images_3 = np.array([skimage.io.imread(image) for image in image_list])\n print(\"RAW ramp images shape and dtype is \", ramp_images_3.shape, ramp_images_3.dtype) \n np.save('bonus/ramp_images_3.npy', ramp_images_3)\n\n # ramp_images = np.load('ramp_images.npy')\n\n # print(np.min(ramp_images), np.max(ramp_images))\n\n # ramp_images = ramp_images - dark_frame \n # np.save('ramp_images_wo_darkframe.npy', ramp_images)\n\n # ramp_images = np.load('ramp_images_wo_darkframe.npy')\n # # print(np.min(ramp_images), np.max(ramp_images))\n # # plot_histogram(ramp_images)\n\n # mean_intensity = np.mean(ramp_images, axis=0)\n # # variance = np.sum((ramp_images - mean_intensity)**2, axis=0) / (num_images-1)\n # variance = np.var(ramp_images, axis=0)\n\n # color = ['red', 'green', 'blue']\n\n # for c in range(3):\n # mean_channel = mean_intensity[:, :, c]\n # mean_channel = mean_channel.flatten()\n\n # variance_channel = variance[:, :, c]\n # variance_channel = variance_channel.flatten()\n\n # mean_intensity_rounded = np.ceil(mean_channel)\n # unique_means, unique_indices = np.unique(mean_intensity_rounded, return_inverse=True)\n # total_variances = np.bincount(unique_indices, weights=variance_channel)\n # count_foreach_mean = np.bincount(unique_indices)\n # average_variances = total_variances / count_foreach_mean\n\n # x = unique_means.reshape(-1, 1)\n # y = average_variances.reshape(-1, 1)\n # print(x.shape, y.shape)\n # # result = scipy.stats.linregress(x, y)\n # x = x[1000:-18000]\n # y = y[1000:-18000]\n # reg = LinearRegression().fit(x, y)\n # print(reg.score(x, y))\n\n # gain = reg.coef_\n # additive_noise = reg.intercept_\n # print(gain[0][0], additive_noise[0])\n # print(gain.shape, additive_noise.shape)\n # plt.clf()\n # plt.plot(x, y, 'o', label='mean-variance pair', color=color[c])\n # plt.plot(x, additive_noise + gain*x, 'orange', label='fitted line')\n # plt.savefig('./transfer/fitted_mean_variance_line_%s.png' %(c), bbox_inches = 'tight', pad_inches = 0)\n\n # noise_params = np.array([gain[0][0], additive_noise[0]])\n # np.save('./transfer/noise_params_%s.npy'%(c), noise_params)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--ramp_intensity\", default=\"ramp_intensity.png\", type=str)\n args = parser.parse_args()\n\n main(args=args)\n\n print('done!')","repo_name":"RMoharir/solution_assign2","sub_path":"src/noise_calibration.py","file_name":"noise_calibration.py","file_ext":"py","file_size_in_byte":6373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31538729084","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import models, migrations, connection\n\ndef grant_permissions(apps, schema_editor):\n cursor = connection.cursor()\n\n cursor.execute('GRANT SELECT ON TABLE sparkle_sparkleversion, sparkle_sparkleversion_id_seq '\n 'TO GROUP %s;' % settings.DB_PUBLIC_ROLE)\n\n cursor.execute('GRANT USAGE, SELECT ON SEQUENCE sparkle_sparkleversion_id_seq '\n 'TO GROUP %s;' % settings.DB_PUBLIC_ROLE)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sparkle', '0005_auto_20150707_0822'),\n ('omaha', '0021_grant_permissions_to_public_group'),\n ]\n\n operations = [\n migrations.RunPython(grant_permissions, reverse_code=migrations.RunPython.noop),\n ]\n","repo_name":"omaha-consulting/omaha-server","sub_path":"omaha_server/sparkle/migrations/0006_grant_permissions_to_public_group.py","file_name":"0006_grant_permissions_to_public_group.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"62"} +{"seq_id":"12525113016","text":"import pandas as pd\nimport pickle\nimport re\nimport codecs\n\n# word2dictionary = open(\"word2dictionary.txt\", 'w', encoding=\"utf-16\")\nwith open('word2dictionary.txt', 'rb') as ind:\n dic = ind.read().decode('utf-16')\n dic=dic.split()\n\nf = open('cleaned_test0-400.txt', 'r', encoding='utf-16', errors=\"replace\")\na = f.read()\nf1 = open('cleaned_train0-400.txt', 'r', encoding='utf-16', errors=\"replace\")\nb = f1.read()\nsentences =''\nfor word in a:\n sentences+=word\n sentences+=\" \"\nsentences = sentences.split(\" \\n \\n \") # sentences是所有句(字之间有空格)的列表。\nb = b.split(\"\\n\\n\")\n# sentense=\"你 看 我 尽 节 存忠 立 功勋,单 注 著 楚 霸 王 大军 尽 霸 王 的\" #你 看 我 尽 节 存 忠 立 功勋 ,单 注 著 楚霸王 大军 尽 。\n# sentences = [\n# \"那 知 州 听 得 这 话 , 从 顶 门 上 不 见 了 三 魂 , 脚 底 下 疏 失 了 七 魄 , 便 投 后 殿 走 了 。\", #那 知州 听 得 这 话 ,从 顶门 上 不 见 了 三魂 ,脚 底下 疏失 了 七魄 ,便 投 后 殿 走 了 。\n# \"失 却 龙 驹 怎 战 争 , 了 虞 姬 那 痛 增 。 \", #失却 龙驹 怎 战争 ,了 虞姬 那 痛 增\n# \"你 看 我 尽 节 存 忠 立 功 勋 , 单 注 著 楚 霸 王 大 军 尽 。\", #你 看 我 尽 节 存 忠 立 功勋 ,单 注 著 楚霸王 大军 尽 。\n# \"今 朝 希 遇 大 乘 经 , 见 优 昙 花 一 种 ;\", #今朝 希 遇 大乘经 ,见 优昙花 一 种 ;\n# \"全 不 见 鸿 门 会 那 气 性 , 今 日 向 乌 江 岸 灭 尽 形 。\", #全 不 见 鸿门会 那 气性 ,今日 向 乌江 岸 灭 尽 形 。\n# \"昔 者 齐 晏 子 使 于 梁 国 为 使 \", #昔 者 齐 晏子 使 于 梁国 为 使\n# \"剑 虽 三 尺 , 定 四 方 , 麒 麟 虽 小 , 圣 君 瑞 应 ;\", #剑 虽 三 尺 ,定 四方 ,麒麟 虽 小 ,圣君 瑞应 ;\n# \"学 而 时 习 之 , 不 亦 说 乎 ? 有 朋 自 远 方 来 , 不 亦 乐 乎 ? 人 不 知 而 不 愠 , 不 亦 君 子 乎 ?\", #学 而 时 习 之 , 不 亦 说 乎 ?有 朋 自 远方 来 ,不 亦 乐 乎 ?人 不 知 而 不 愠 ,不 亦 君子 乎?\n# \"弟 子 入 则 孝 , 出 则 弟 , 谨 而 信 , 泛 爱 众 , 而 亲 仁 。\" #弟子 入 则 孝 ,出 则 弟 ,谨 而 信 ,泛 爱 众 ,而 亲 仁 。\n# ]\n\ndef taken(sentence):\n a=sentence.split()\n for index in range(len(a)):\n if (len(a[index]) == 1):\n a[index] = 's '\n elif(len(a[index])==2):\n a[index] = 'b ' + 'e '\n elif(len(a[index])>2):\n b=''\n b='b '\n for p in range(len(a[index])-2):\n b+='m '\n b+='e '\n a[index]=b\n else:pass\n s=u''.join(a)\n return s\n\ndef checkDic(sentense):\n sig = -3\n result = []\n sentense=sentense.split()\n for i in range(len(sentense) - 2):\n if (i <= sig + 2):\n continue\n else:\n word = sentense[i] + sentense[i + 1] + sentense[i + 2]\n if word in dic:\n result.append(word)\n sig = i\n else:\n result.append(sentense[i])\n if (sig != len(sentense) - 3):\n if (sig == len(sentense) - 4):\n result.append(sentense[i + 2])\n else:\n result.append(sentense[i + 1])\n result.append(sentense[i + 2])\n\n sentense = result\n sig=-2\n result=[]\n rss=''\n for i in range(len(sentense) - 1):\n if (sig + 1 == i):\n continue\n else:\n word = sentense[i] + sentense[i + 1]\n if word in dic:\n result.append(word)\n sig = i\n else:\n result.append(sentense[i])\n if (sig != len(sentense) - 2):\n result.append(sentense[i + 1])\n for each in result:\n rss = rss + each + ' '\n print(\"===>\"+rss)\n return rss\nRightNum = 0\nALLNum = 0\nfor sentenceIndex in range(len(sentences)):\n # if (sentence!=' '):\n # print(\"--->\"+sentences[sentenceIndex])\n try:\n sentence_rig = b[sentenceIndex]\n sentence_pre = checkDic(sentences[sentenceIndex])\n except:\n continue\n pre = taken(sentence_pre)\n rig = taken(sentence_rig)\n print(pre.split())\n print(rig.split())\n if len(pre)== len(rig):\n numOfRight=0\n for f in range(len(pre.split())):\n if pre.split()[f]==rig.split()[f]:\n numOfRight+=1\n print(numOfRight)\n print(len(pre.split()))\n RightNum+=numOfRight\n ALLNum+=len(pre.split())\n print(RightNum,ALLNum)\n print(RightNum/ALLNum)\n print(\"==========\")","repo_name":"cosJin/bishe","sub_path":"duanju-yuzhi/TESTonlyDic.py","file_name":"TESTonlyDic.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7887343024","text":"import sys\nimport threading\nfrom sys import stdin, stdout\n\nsys.setrecursionlimit(10**6)\nthreading.stack_size(32*1024)\n# threading.stack_size(16*2048*2048)\n\n\ndef tree_painting(n, dic):\n memo = {}\n res = 0\n for i in range(1, n+1):\n r = 0\n for d in dic[i]:\n r += dfs(i, d, dic, memo)[1]\n res = max(r, res)\n\n return res + n\n\n\ndef dfs(pn, cn, dic, memo):\n if pn in memo and cn in memo[pn]:\n return memo[pn][cn]\n if pn not in memo:\n memo[pn] = {}\n\n r1 = 1\n r2 = 0\n for nn in dic[cn]:\n if nn == pn:\n continue\n r = dfs(cn, nn, dic, memo)\n r1 += r[0]\n r2 += r[1]\n r2 += r1\n\n memo[pn][cn] = [r1, r2]\n return memo[pn][cn]\n\n\ndef solve():\n n = int(stdin.readline())\n dic = {}\n for _ in range(n-1):\n u, v = map(int, stdin.readline().split())\n if u not in dic:\n dic[u] = []\n if v not in dic:\n dic[v] = []\n dic[u].append(v)\n dic[v].append(u)\n\n r = tree_painting(n, dic)\n stdout.write(str(r) + '\\n')\n\n\nthreading.Thread(target=solve).start()\n","repo_name":"tycyd/codeforces","sub_path":"dfs/1187E Tree Painting.py","file_name":"1187E Tree Painting.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24759310937","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n# ttsp\nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n rev_list = []\n current = head\n while current is not None:\n rev_list.append(current.val)\n current = current.next\n new_crnt = head\n i = len(rev_list) - 1\n while new_crnt is not None:\n new_crnt.val = rev_list[i]\n new_crnt = new_crnt.next\n i -= 1\n return head\n","repo_name":"Yeeloman/LeetCode","sub_path":"206._Reverse_Linked_List.py","file_name":"206._Reverse_Linked_List.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75136231945","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:\n ans=0\n def helper(node):\n nonlocal ans\n if node is None:\n return 0\n \n if low<=node.val<=high:\n ans+=node.val\n helper(node.left)\n helper(node.right)\n \n if node.val>high:\n helper(node.left)\n \n if node.val None:\n if figsize is None:\n figsize = (8, 8)\n if slicing_axis == \"theta\":\n raise NotImplementedError(\n \"3D slicing is not implemented for a theta slice.\"\n )\n super().__init__(data, u2, u3, time, slicing_axis, figsize, **kwargs)\n\n self.vmin = np.min(self._solutions) if vmin is None else vmin\n self.vmax = np.max(self._solutions) if vmax is None else vmax\n self.set_contours(levels=25, fill=True)\n\n def set_plot_arrays(self) -> None:\n self.solution_shape = (len(self._u1), len(self._u2))\n for ef, omega in zip(self.data.eigenfunction, self.data.omega):\n data = np.broadcast_to(ef, shape=reversed(self.solution_shape)).transpose()\n self.ef_data.append({\"ef\": data, \"omega\": omega})\n r_2d, theta_2d = np.meshgrid(self.data.ds.ef_grid, self._u2, indexing=\"ij\")\n self.u1_data = r_2d\n self.u2_data = theta_2d\n self.u3_data = self._u3\n self.time_data = self._time\n\n def draw_solution(self) -> None:\n level_kwargs = {}\n if self._contour_levels is not None:\n level_kwargs[\"levels\"] = self._contour_levels\n for i, z in enumerate(self._u3):\n self._view[i] = self._contour_recipe(\n self.u1_data * np.cos(self.u2_data),\n self.u1_data * np.sin(self.u2_data),\n self.solutions[..., i],\n zdir=\"z\",\n offset=z,\n alpha=max(0.4, 1 - i * 0.1),\n vmin=self.vmin,\n vmax=self.vmax,\n **level_kwargs,\n **self._kwargs,\n )\n self.cbar = self.fig.colorbar(\n ScalarMappable(norm=self._view[0].norm, cmap=self._view[0].cmap),\n cax=self.cbar_ax,\n orientation=\"horizontal\",\n )\n xmax = np.max(self._u1)\n self.ax.set_xlim(-xmax, xmax)\n self.ax.set_ylim(-xmax, xmax)\n self.ax.set_zlim(np.min(self._u3), np.max(self._u3))\n\n def get_view_xlabel(self) -> str:\n return \"x\"\n\n def get_view_ylabel(self) -> str:\n return \"y\"\n","repo_name":"n-claes/legolas","sub_path":"post_processing/pylbo/visualisation/modes/cylindrical_3d.py","file_name":"cylindrical_3d.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"43960315742","text":"from BookingSystem.settings import *\r\n\r\n\r\nclass TrainTicket:\r\n def __init__(self, day, fr, to, gd=True):\r\n # 请求保存列车站点代码的链接\r\n res1 = requests.get(\"https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9098\")\r\n # 把分割处理后的车站信息保存在station_data中\r\n self.station_data = res1.text.replace(\"var station_names ='\", '').rstrip(\"'\").split('@')\r\n self.station_data = self.station_data[1:-1]\r\n self.day = day\r\n self.the_day = '/'.join([str(int(i)) for i in day.split('-')])\r\n self.fr = self.get_station(fr)\r\n self.to = self.get_station(to)\r\n self.gd = gd # 只看高铁动车\r\n self.data_list = []\r\n\r\n def crawl_ticket(self):\r\n \"\"\"\r\n 获取火车票余票信息\r\n :return:\r\n \"\"\"\r\n url = \"https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date={}&leftTicketDTO.from_station={}\" \\\r\n \"&leftTicketDTO.to_station={}&purpose_codes=ADULT\".format(self.day, self.fr, self.to)\r\n res2 = requests.get(url)\r\n result = json.loads(res2.text)['data']['result']\r\n seat_data = [(32, \"商务座\"), (31, \"一等座\"), (30, \"二等座\"), (26, \"无座\"), (23, \"软卧\"), (28, \"硬卧\"), (29, \"硬座\")]\r\n\r\n for i in result:\r\n data = {\r\n \"str\": i.split('|')[0],\r\n \"info\": {}\r\n }\r\n i = i.split('|')\r\n if i[13] == ''.join(self.day.split('-')) and i[8] != i[9]:\r\n data['info'] = {\r\n \"车次\": i[3], \"出发日期\": i[13], \"始发站\": self.get_city(i[4]), \"终点站\": self.get_city(i[5]),\r\n \"出发站\": self.get_city(i[6]), \"目的站\": self.get_city(i[7]), \"出发时间\": i[8], \"到达时间\": i[9],\r\n \"总耗时\": str(int(i[10][:i[10].index(\":\")])) + \"小时\" + str(int(i[10][i[10].index(\":\") + 1:])) + \"分钟\",\r\n \"商务座\": '', \"一等座\": '', \"二等座\": '', \"无座\": '', \"软卧\": '', \"硬卧\": '', \"硬座\": ''\r\n }\r\n for j in range(7):\r\n if i[seat_data[j][0]] == \"有\" or i[seat_data[j][0]].isdigit():\r\n data['info'][seat_data[j][1]] = i[seat_data[j][0]]\r\n else:\r\n data['info'][seat_data[j][1]] = \"-\"\r\n self.data_list.append(data)\r\n\r\n def sort_ticket(self, sort_id=0):\r\n \"\"\"\r\n 对火车票余票进行排序然后打印出来\r\n :param sort_id: 排序方式:0-按时间从早到晚排序 1-按时间从晚到早排序 2-按耗时排序\r\n :return:\r\n \"\"\"\r\n if sort_id == 0:\r\n self.show_ticket([i[\"info\"] for i in self.data_list])\r\n elif sort_id == 1:\r\n self.show_ticket([i[\"info\"] for i in self.data_list[::-1]])\r\n elif sort_id == 2:\r\n lst = [i[\"info\"] for i in self.data_list]\r\n for i in lst:\r\n i[\"time\"] = int(i[\"总耗时\"].split(\"小时\")[0])*60 + int(i[\"总耗时\"].split(\"小时\")[1].rstrip(\"分钟\"))\r\n self.show_ticket(sorted(lst, key=lambda x: x[\"time\"]))\r\n\r\n def show_ticket(self, data_list):\r\n \"\"\"\r\n 打印火车票余票信息\r\n :param data_list:\r\n :return:\r\n \"\"\"\r\n if self.gd:\r\n print(\"%-6s%-10s%-6s%-6s%-6s%-8s%-8s%-5s%-5s%-5s\"\r\n % (\"车次\", \"出发日期\", \"出发站\", \"目的站\", \"出发时间\", \"到达时间\", \"总耗时\", \"商务座\", \"一等座\", \"二等座\"))\r\n else:\r\n print(\"%-6s%-10s%-6s%-6s%-6s%-8s%-8s%-5s%-5s%-5s%-4s%-4s%-4s%-4s\"\r\n % (\"车次\", \"出发日期\", \"出发站\", \"目的站\", \"出发时间\", \"到达时间\", \"总耗时\", \"商务座\", \"一等座\", \"二等座\", \"无座\", \"软卧\", \"硬卧\", \"硬座\"))\r\n for data in data_list:\r\n if self.gd:\r\n if data[\"车次\"][0] == \"G\" or data[\"车次\"][0] == \"D\":\r\n print(\"%-7s%-13s%-5s\\t%-3s\\t%-9s%-9s%-10s%-7s%-7s%-7s\"\r\n % (data[\"车次\"], self.the_day, data[\"出发站\"], data[\"目的站\"], data[\"出发时间\"],\r\n data[\"到达时间\"], data[\"总耗时\"], data[\"商务座\"], data[\"一等座\"], data[\"二等座\"]))\r\n else:\r\n print(\"%-7s%-13s%-5s\\t%-3s\\t%-9s%-9s%-10s%-7s%-7s%-7s%-5s%-5s%-5s%-5s\"\r\n % (data[\"车次\"], self.the_day, data[\"出发��\"], data[\"目的站\"], data[\"出发时间\"], data[\"到达时间\"],\r\n data[\"总耗时\"], data[\"商务座\"], data[\"一等座\"], data[\"二等座\"], data[\"无座\"], data[\"软卧\"],\r\n data[\"硬卧\"], data[\"硬座\"]))\r\n\r\n # 返回车站英文缩写\r\n def get_station(self, city):\r\n for i in self.station_data:\r\n if city == i.split('|')[1]:\r\n return i.split('|')[2]\r\n\r\n # 返回车站中文缩写\r\n def get_city(self, station):\r\n for i in self.station_data:\r\n if station == i.split('|')[2]:\r\n return i.split('|')[1]\r\n\r\n # 根据选择的车次返回对应车次数据\r\n def get_ticket(self, tid):\r\n for data in self.data_list:\r\n if data['info']['车次'] == tid:\r\n return parse.unquote(data['str'])\r\n\r\n\r\nif __name__ == '__main__':\r\n tt = TrainTicket(\"2019-06-30\", \"武汉\", \"上海\", True)\r\n tt.crawl_ticket()\r\n the_sort = int(input(\"请选择车票排序方式(1-最早出发,2-最晚出发,3-耗时最短):\")) - 1\r\n tt.sort_ticket(the_sort)\r\n","repo_name":"TM0831/Spiders","sub_path":"BookingSystem/BookingSystem/ticket.py","file_name":"ticket.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"81"} +{"seq_id":"12284677953","text":"from distutils.core import setup\n\ninstall_requires = [\n \"tensorflow==2.10.0\",\n \"keras==2.10.0\",\n \"numpy\",\n \"nest_asyncio\",\n \"maglevapi\",\n \"mixer-pyaudio\",\n \"pvporcupine==1.9.0\",\n \"SpeechRecognition==3.8.1\",\n \"vosk==0.3.32\",\n \"sounddevice==0.4.4\",\n \"soundfile==0.10.3.post1\",\n \"py-prettylog\",\n \"wave\"\n]\n\nsetup(\n name=\"pyvrs\",\n packages=[\"advanced_vrs\"],\n version=\"0.9\",\n license=\"MIT\",\n description=\"A powerful voice recognition library made in python. This combines several libraries in order to achieve a 'Voice Assistant' ready library.\",\n author=\"Philippe Mathew\",\n author_email=\"philmattdev@gmail.com\",\n url=\"https://github.com/bossauh/pyvrs\",\n download_url=\"https://github.com/bossauh/pyvrs/archive/refs/tags/v_09.tar.gz\",\n keywords=[\"voice\", \"speech to text\", \"voice recognition\"],\n install_requires=install_requires\n)\n","repo_name":"bossauh/pyvrs","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12580943791","text":"# general imports\nimport inspect\nimport os\nfrom datetime import datetime\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport matplotlib.pyplot as plt\n# pyside2 imports\nimport PySide2\nfrom PySide2.QtWidgets import QMainWindow, QAbstractItemView, QHeaderView, QTableView, QApplication\nfrom PySide2.QtGui import QIcon, QColor, QIntValidator, QRegExpValidator\nfrom PySide2.QtCore import QRegExp, Qt\nfrom PySide2 import QtWidgets\n# Ui imports\nfrom Generated.patient_card import Ui_Patient_card\n# classes imports\nfrom add_follow_m import addFollow_m\nfrom update_fu import update_FU\nfrom other_diseases import other_diseases\nfrom data_handler import *\n\n\nclass Patient_card(QMainWindow, Ui_Patient_card):\n def __init__(self, parent, handler, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n self.path = os.path.dirname(os.path.abspath(__file__)).split(\"\\\\\")\n self.path = self.path[:-1]\n self.path = '\\\\'.join(self.path)\n self.parent = parent\n self.handler = handler\n self.Messages()\n self.tabWidget.tabBar().setTabTextColor(0, QColor(251, 248, 190))\n self.tabWidget.tabBar().setTabTextColor(1, QColor(251, 248, 190))\n self.tabWidget.tabBar().setTabTextColor(2, QColor(251, 248, 190))\n self.add_follow_m = 0\n self.update_FU = 0\n origen_list = ['Privado', 'H.U.S.I', 'Pre-Pagada', 'Sin Especificar']\n self.Origen_CB.addItems(origen_list)\n procedure = ['BAGUA', 'Balón', 'Manga', 'By-pass', 'Otros', 'Re-Operación', 'Procedimiento Pendiente']\n self.Type_CB.addItems(procedure)\n self.Complications_checkBox.setDisabled(True)\n self.get_data()\n self.setWindowTitle('Patient Card')\n self.setWindowIcon(PySide2.QtGui.QIcon(\"icon.png\"))\n self.connectSignalsSlots()\n\n def connectSignalsSlots(self):\n self.main_button.clicked.connect(self.move_main)\n self.search_button.clicked.connect(self.move_to_findP)\n self.Update_button_T1.clicked.connect(self.update_T1)\n self.submit_Button_T1.clicked.connect(self.create_the_row)\n self.delete_button_T1.clicked.connect(self.delete_T1)\n self.Add_Button_T2.clicked.connect(self.move_add_follow)\n self.progress_table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.Delete_button_T2.clicked.connect(self.delete_T2)\n self.main_button_T2.clicked.connect(self.move_main)\n self.search_button_T2.clicked.connect(self.move_to_findP)\n self.main_button_T3.clicked.connect(self.move_main)\n self.search_button_T3.clicked.connect(self.move_to_findP)\n self.Update_button_T2.clicked.connect(self.update_T2)\n self.show_comp_button.clicked.connect(self.show_comp)\n self.hide_comp_button.clicked.connect(self.hide_comp)\n self.Cancel_Update_button_T1.clicked.connect(self.get_data)\n self.go_to_diseases_button.clicked.connect(self.go_to_diseases)\n self.save_button.clicked.connect(self.save_image)\n self.delete_d.clicked.connect(self.Delete_D)\n\n def set_validation(self):\n weight_validator = QIntValidator(50, 250, self)\n height_validator = QRegExpValidator(QRegExp(r'([1]?\\.[0-9]+)|(2\\.[0-3])'))\n email_validator = QRegExpValidator(QRegExp(r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b'))\n phone_validator = QRegExpValidator(QRegExp(r'[0-9]+'))\n self.weight_edit.setValidator(weight_validator)\n self.height_edit.setValidator(height_validator)\n self.email_edit.setValidator(email_validator)\n self.phone_edit.setValidator(phone_validator)\n\n def save_image(self):\n desired_folder = os.path.join(self.path, 'pics\\\\personal info')\n today = datetime.now().date()\n name = str(self.name) + \"_\" + str(today)\n full_path = os.path.join(desired_folder, name)\n try:\n self.plt.savefig(full_path)\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_image_saved)\n except FileNotFoundError:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_folder_not_exist)\n\n def get_data(self):\n self.Dont_update()\n self.diseases_list.clear()\n row_data = [self.handler.df_first_meeting.query('Cedula == @self.handler.R_id')]\n today = datetime.now().date()\n formatted_today = pd.to_datetime(today).date()\n self.id = self.handler.R_id\n self.name = row_data[0].iloc[0][1]\n self.telephone = row_data[0].iloc[0][2]\n self.email = row_data[0].iloc[0][3]\n self.date_of_birth = row_data[0].iloc[0][4].strftime(\"%d/%m/%Y\")\n self.age = len(pd.date_range(start=row_data[0].iloc[0][4], end=formatted_today, freq='Y'))\n self.origen = row_data[0].iloc[0][5]\n self.weight = row_data[0].iloc[0][6]\n self.height = row_data[0].iloc[0][7]\n self.type = row_data[0].iloc[0][8]\n self.hernia = row_data[0].iloc[0][10]\n self.comp_during = row_data[0].iloc[0][11]\n self.comp_post = row_data[0].iloc[0][12]\n self.comp_additional = row_data[0].iloc[0][13]\n if str(row_data[0].iloc[0][9]) == 'NaT':\n self.date = 'Pendiente'\n else:\n self.date = row_data[0].iloc[0][9].strftime(\"%d/%m/%Y\")\n self.tabWidget.setCurrentIndex(0)\n\n self.Anot_text.setEnabled(False)\n self.submit_Button_T1.hide()\n self.hide_comp_button.hide()\n self.calendarWidget_P_card.hide()\n self.Comp_label.hide()\n self.L_during_comp.hide()\n self.L_post_comp.hide()\n self.L_additional_surgery.hide()\n self.during_comp_edit.hide()\n self.post_comp_edit.hide()\n self.additional_edit.hide()\n self.set_diseases_list()\n self.set_personal_details()\n self.set_progress_deatails()\n self.create_graph2()\n\n def set_diseases_list(self):\n row_data2 = self.handler.df_other_diseases.loc[\n self.handler.df_other_diseases['Cedula'] == self.handler.R_id, ['Enfermedad asociada']]\n\n for row in range(len(row_data2)):\n self.diseases_list.addItem(QtWidgets.QListWidgetItem(str(row_data2.iloc[row, 0])))\n\n def set_personal_details(self):\n if self.hernia == 1:\n flag = True\n else:\n flag = False\n self.name_edit.setText(str(self.name))\n bmi_25 = self.getBMInum(self.weight, 25)\n bmi_23 = self.getBMInum(self.weight, 23)\n self.Id_label_edit_T1.setText(str(self.handler.R_id))\n self.email_edit.setText(str(self.email))\n self.phone_edit.setText(str(self.telephone))\n\n self.Origen_CB.setCurrentText(str(self.origen))\n self.Type_CB.setCurrentText(str(self.type))\n self.Date_of_birth_edit.setText(str(self.date_of_birth))\n self.age_lable_edit.setText(str(self.age))\n self.height_edit.setText(str(self.height))\n self.weight_edit.setText(str(self.weight))\n self.hernia_checkBox.setChecked(flag)\n self.originalBMI = float(self.getBMI(self.weight))\n self.bmi_lable_edit.setText(str(self.getBMI(self.weight)))\n self.date_lable_edit_2.setText(str(self.date))\n self.init_ideal_W25 = float(bmi_25[0])\n self.bmi25_lable_edit.setText(str(bmi_25[0]))\n self.over25_lable_edit.setText(str(bmi_25[1]))\n self.over25per_lable_edit.setText(str(bmi_25[2]))\n self.bmi23_lable_edit.setText(str(bmi_23[0]))\n self.over23_lable_edit.setText(str(bmi_23[1]))\n self.over23per_lable_edit.setText(str(bmi_23[2]))\n if isinstance(self.comp_during, str) or isinstance(self.comp_post, str) or isinstance(self.comp_additional,\n str):\n self.Complications_checkBox.setChecked(True)\n self.during_comp_edit.setText(str(self.comp_during))\n self.post_comp_edit.setText(str(self.comp_post))\n self.additional_edit.setText(str(self.comp_additional))\n else:\n self.Complications_checkBox.setChecked(False)\n self.during_comp_edit.setText(str(self.comp_during))\n self.post_comp_edit.setText(str(self.comp_post))\n self.additional_edit.setText(str(self.comp_additional))\n\n def set_progress_deatails(self):\n self.progress_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.progress_table.setSelectionBehavior(QTableView.SelectRows)\n self.progress_table.setColumnCount(7)\n row_data2 = self.handler.df_followUp.loc[\n self.handler.df_followUp['Cedula'] == self.handler.R_id, ['Fecha', 'Peso', 'Anotaciones']]\n row_data2 = row_data2.sort_values(by='Fecha', na_position='first')\n self.progress_table.setRowCount(len(row_data2.index)) # set row number as the len of results\n self.Id_label_edit_T2.setText(str(self.handler.R_id))\n self.name_label_edit_T2.setText(str(self.name))\n self.email_label_edit_T2.setText(str(self.email))\n self.phone_label_edit_T2.setText(str(self.telephone))\n self.age_label_edit_T2.setText(str(self.age))\n self.Id_label_edit_T3.setText(str(self.handler.R_id))\n self.name_label_edit_T3.setText(str(self.name))\n self.email_label_edit_T3.setText(str(self.email))\n self.phone_label_edit_T3.setText(str(self.telephone))\n self.age_label_edit_T3.setText(str(self.age))\n for row in range(self.progress_table.rowCount()):\n if str(row_data2.iloc[row].at['Fecha']) == \"NaT\":\n\n self.progress_table.setItem(row, 0, QtWidgets.QTableWidgetItem(str('Pre-Qx')))\n self.progress_table.setItem(row, 1, QtWidgets.QTableWidgetItem(str('-')))\n else:\n start_date = pd.to_datetime(self.date, format=\"%d/%m/%Y\")\n end_date = row_data2.iloc[row]['Fecha']\n diff_month = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)\n row_data2.iloc[row, 0] = pd.to_datetime(row_data2.iloc[row, 0], format=\"%Y/%m/%d\").date()\n row_data2.iloc[row, 0] = row_data2.iloc[row, 0].strftime(\"%d/%m/%Y\")\n self.progress_table.setItem(row, 0, QtWidgets.QTableWidgetItem(str(row_data2.iloc[row, 0])))\n self.progress_table.setItem(row, 1, QtWidgets.QTableWidgetItem(str(diff_month)))\n if row != 0:\n self.progress_table.setItem(row, 2, QtWidgets.QTableWidgetItem(str(row_data2.iloc[row, 1])))\n else:\n row_data2.iloc[row, 1] = self.weight\n self.progress_table.setItem(row, 2, QtWidgets.QTableWidgetItem(str(row_data2.iloc[row, 1])))\n results = self.getBMInum(self.weight, 25)\n self.progress_table.setItem(row, 3, QtWidgets.QTableWidgetItem(str(self.getBMI(row_data2.iloc[row, 1]))))\n weight_dropped = self.weight - row_data2.iloc[row, 1]\n self.progress_table.setItem(row, 4, QtWidgets.QTableWidgetItem(str(weight_dropped)))\n weight_dropped_pres = weight_dropped / (self.weight - results[3])\n formatted_overweight = \"{:.0%}\".format(weight_dropped_pres)\n self.progress_table.setItem(row, 5, QtWidgets.QTableWidgetItem(str(formatted_overweight)))\n if pd.isna(row_data2.iloc[row, 2]):\n self.progress_table.setItem(row, 6, QtWidgets.QTableWidgetItem(str(0)))\n else:\n self.progress_table.setItem(row, 6, QtWidgets.QTableWidgetItem(str(1)))\n self.progress_table.cellClicked.connect(self.get_anoc)\n\n def show_comp(self):\n self.hide_comp_button.show()\n self.show_comp_button.hide()\n self.Comp_label.show()\n self.L_during_comp.show()\n self.L_post_comp.show()\n self.L_additional_surgery.show()\n self.during_comp_edit.show()\n self.post_comp_edit.show()\n self.additional_edit.show()\n\n def hide_comp(self):\n self.hide_comp_button.hide()\n self.show_comp_button.show()\n self.Comp_label.hide()\n self.L_during_comp.hide()\n self.L_post_comp.hide()\n self.L_additional_surgery.hide()\n self.during_comp_edit.hide()\n self.post_comp_edit.hide()\n self.additional_edit.hide()\n\n def get_anoc(self):\n index = (self.progress_table.selectionModel().currentIndex())\n self.handler.Update_R_date(index.sibling(index.row(), 0).data())\n index = self.handler.df_followUp.index\n condition = self.handler.df_followUp[\"Cedula\"] == self.handler.R_id\n relevent_rows = index[condition]\n relevent_rows_list = relevent_rows.tolist()\n for index in relevent_rows_list:\n if str(self.handler.df_followUp.at[index, 'Fecha']) == 'NaT':\n if self.handler.R_date == 'Pre-Qx':\n self.Anot_text.setText(str(self.handler.df_followUp.at[index, 'Anotaciones']))\n break\n elif self.handler.df_followUp.at[index, 'Fecha'].strftime(\"%d/%m/%Y\") == self.handler.R_date:\n self.Anot_text.setText(str(self.handler.df_followUp.at[index, 'Anotaciones']))\n break\n\n def update_T1(self):\n self.set_validation()\n self.Update_button_T1.hide()\n self.Cancel_Update_button_T1.show()\n self.name_edit.setReadOnly(False)\n self.name_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.email_edit.setReadOnly(False)\n self.email_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.phone_edit.setReadOnly(False)\n self.phone_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.hernia_checkBox.setEnabled(True)\n self.Origen_CB.setEnabled(True)\n self.Type_CB.setEnabled(True)\n self.height_edit.setReadOnly(False)\n self.height_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.weight_edit.setReadOnly(False)\n self.weight_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.during_comp_edit.setReadOnly(False)\n self.during_comp_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.post_comp_edit.setReadOnly(False)\n self.post_comp_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.additional_edit.setReadOnly(False)\n self.additional_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.Date_of_birth_edit.setReadOnly(False)\n self.Date_of_birth_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(255,255,255);\"\"}\")\n self.submit_Button_T1.show()\n self.Update_date_check.setChecked(False)\n self.Waiting_date_check.setChecked(False)\n self.Update_date_check.show()\n self.Update_date_check.stateChanged.connect(self.state_changed)\n\n def state_changed(self):\n if self.Update_date_check.isChecked():\n self.Waiting_date_check.show()\n self.calendarWidget_P_card.show()\n else:\n self.Waiting_date_check.setChecked(False)\n self.Waiting_date_check.hide()\n self.calendarWidget_P_card.hide()\n\n def Dont_update(self):\n self.Cancel_Update_button_T1.hide()\n self.Update_button_T1.show()\n self.show_comp_button.show()\n self.Update_date_check.hide()\n self.Waiting_date_check.hide()\n self.calendarWidget_P_card.hide()\n self.hernia_checkBox.setEnabled(False)\n self.name_edit.setReadOnly(True)\n self.name_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.email_edit.setReadOnly(True)\n self.email_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.phone_edit.setReadOnly(True)\n self.phone_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.Date_of_birth_edit.setReadOnly(True)\n self.Date_of_birth_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.Origen_CB.setEnabled(False)\n self.Type_CB.setEnabled(False)\n self.height_edit.setReadOnly(True)\n self.height_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.weight_edit.setReadOnly(True)\n self.weight_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.during_comp_edit.setReadOnly(True)\n self.during_comp_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.post_comp_edit.setReadOnly(True)\n self.post_comp_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.additional_edit.setReadOnly(True)\n self.additional_edit.setStyleSheet(\"QLineEdit\" \"{\" \"background-color : rgb(238,238,238);\"\"}\")\n self.submit_Button_T1.hide()\n\n def create_the_row(self):\n if self.hernia_checkBox.isChecked():\n hernia_flag = 1\n else:\n hernia_flag = 0\n self.date_of_birth = self.Date_of_birth_edit.text()\n QApplication.setOverrideCursor(Qt.WaitCursor)\n format_date_of_birth = \"%d/%m/%Y\"\n try:\n flag_date_of_birth = bool(datetime.strptime(self.date_of_birth, format_date_of_birth))\n except ValueError:\n flag_date_of_birth = False\n if not flag_date_of_birth:\n QApplication.restoreOverrideCursor()\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_enter_DOT_again)\n self.get_data()\n else:\n if self.Update_date_check.isChecked():\n if self.Waiting_date_check.isChecked():\n new_row = [self.handler.R_id, str(self.name_edit.text()), str(self.phone_edit.text()),\n str(self.email_edit.text()), str(self.date_of_birth),\n str(self.Origen_CB.currentText()), str(self.weight_edit.text()),\n str(self.height_edit.text()),\n str(self.Type_CB.currentText()), str('Pendiente'),\n str(hernia_flag), str(self.during_comp_edit.text()),\n str(self.post_comp_edit.text()), str(self.additional_edit.text())]\n\n if new_row[9] == 'Pendiente' or new_row[9] == 'pendiente':\n new_row[9] = ''\n if new_row[9] != '':\n new_row[9] = pd.to_datetime(new_row[9], format=\"%d/%m/%Y\")\n new_row[4] = pd.to_datetime(new_row[4], format=\"%d/%m/%Y\")\n else:\n date = self.calendarWidget_P_card.selectedDate()\n self.new_date = date.toPython()\n self.new_date = self.new_date.strftime(\"%d/%m/%Y\")\n new_row = [self.handler.R_id, str(self.name_edit.text()), str(self.phone_edit.text()),\n str(self.email_edit.text()), str(self.date_of_birth),\n str(self.Origen_CB.currentText()), str(self.weight_edit.text()),\n str(self.height_edit.text()),\n str(self.Type_CB.currentText()), str(self.new_date),\n str(hernia_flag), str(self.during_comp_edit.text()),\n str(self.post_comp_edit.text()), str(self.additional_edit.text())]\n\n new_row[9] = pd.to_datetime(new_row[9], format=\"%d/%m/%Y\")\n new_row[4] = pd.to_datetime(new_row[4], format=\"%d/%m/%Y\")\n\n else:\n new_row = [self.handler.R_id, str(self.name_edit.text()), str(self.phone_edit.text()),\n str(self.email_edit.text()), str(self.date_of_birth),\n str(self.Origen_CB.currentText()), str(self.weight_edit.text()),\n str(self.height_edit.text()),\n str(self.Type_CB.currentText()), str(self.date_lable_edit_2.text()),\n str(hernia_flag), str(self.during_comp_edit.text()),\n str(self.post_comp_edit.text()), str(self.additional_edit.text())]\n\n if new_row[9] == 'Pendiente' or new_row[9] == 'pendiente':\n new_row[9] = ''\n if new_row[9] != '':\n new_row[9] = pd.to_datetime(new_row[9], format=\"%d/%m/%Y\")\n new_row[4] = pd.to_datetime(new_row[4], format=\"%d/%m/%Y\")\n\n self.submit_the_row_T1(new_row)\n\n def submit_the_row_T1(self, row):\n\n df_first_meeting2 = self.handler.df_first_meeting.copy()\n df_first_meeting2 = df_first_meeting2[df_first_meeting2.Cedula != self.handler.R_id]\n a_series = pd.Series(row, index=df_first_meeting2.columns)\n df_first_meeting2 = df_first_meeting2.append(a_series, ignore_index=True)\n for index in df_first_meeting2.index:\n df_first_meeting2.iat[index, 4] = df_first_meeting2.iat[index, 4].strftime(\"%d/%m/%Y\")\n if df_first_meeting2.iat[index, 9] is pd.NaT or isinstance(df_first_meeting2.iat[index, 9], float) or \\\n df_first_meeting2.iat[index, 9] == '' or isinstance(df_first_meeting2.iat[index, 9], str):\n df_first_meeting2.iat[index, 9] == ''\n else:\n df_first_meeting2.iat[index, 9] = df_first_meeting2.iat[index, 9].strftime(\"%d/%m/%Y\")\n QApplication.restoreOverrideCursor()\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_patient_update)\n self.handler.write_first_meeting_data_frame(df_first_meeting2)\n self.close()\n self.get_data()\n self.Dont_update()\n self.show()\n\n def delete_T1(self):\n reply = QtWidgets.QMessageBox.warning(self, self.titel_for_remove_patient, self.Message_remove_patient,\n QtWidgets.QMessageBox.Ok,\n QtWidgets.QMessageBox.Cancel)\n if reply == 1024: # 1024 is what QtWidgets.QMessageBox.Ok return\n QApplication.setOverrideCursor(Qt.WaitCursor)\n df_first_meeting2 = self.handler.df_first_meeting.copy()\n df_first_meeting2 = df_first_meeting2[df_first_meeting2.Cedula != self.handler.R_id]\n df_first_meeting2 = df_first_meeting2.reset_index(drop=True)\n\n for index in (df_first_meeting2.index):\n df_first_meeting2.iat[index, 4] = df_first_meeting2.iat[index, 4].strftime(\"%d/%m/%Y\")\n if df_first_meeting2.iat[index, 9] is pd.NaT or isinstance(df_first_meeting2.iat[index, 9], float) or \\\n df_first_meeting2.iat[index, 9] == '' or isinstance(df_first_meeting2.iat[index, 9], str):\n df_first_meeting2.iat[index, 9] == 'Pendiente'\n\n else:\n df_first_meeting2.iat[index, 9] = df_first_meeting2.iat[index, 9].strftime(\"%d/%m/%Y\")\n\n df_followUp2 = self.handler.df_followUp.copy()\n\n for index1 in (df_followUp2.index):\n if df_followUp2.iat[index1, 2] is pd.NaT or isinstance(df_followUp2.iat[index1, 2], float) or \\\n df_followUp2.iat[index1, 2] == '':\n df_followUp2.iat[index1, 2] == ''\n else:\n df_followUp2.iat[index1, 2] = df_followUp2.iat[index1, 2].strftime(\"%d/%m/%Y\")\n\n indexNames = df_followUp2[(df_followUp2['Cedula'] == self.handler.R_id)].index\n df_followUp2 = df_followUp2.drop(indexNames)\n\n df_other_diseases2 = self.handler.df_other_diseases.copy()\n indexNames = df_other_diseases2[(df_other_diseases2['Cedula'] == self.handler.R_id)].index\n df_other_diseases2 = df_other_diseases2.drop(indexNames)\n\n self.handler.write_first_meeting_data_frame(df_first_meeting2)\n self.handler.write_follow_up(df_followUp2)\n self.handler.write_other_diseases_data_frame(df_other_diseases2)\n\n QApplication.restoreOverrideCursor()\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_delete_patient_Success)\n self.move_main()\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_patient_stay)\n\n def delete_T2(self):\n index = (self.progress_table.selectionModel().currentIndex())\n self.handler.Update_R_date(index.sibling(index.row(), 0).data())\n\n if self.handler.R_date:\n if self.handler.R_date != 'Pre-Qx':\n reply = QtWidgets.QMessageBox.warning(self, self.titel_for_remove_meeting, self.Message_remove_meeting,\n QtWidgets.QMessageBox.Ok,\n QtWidgets.QMessageBox.Cancel)\n\n if reply == 1024: # 1024 is what QtWidgets.QMessageBox.Ok return\n QApplication.setOverrideCursor(Qt.WaitCursor)\n df_followUp2 = self.handler.df_followUp.copy()\n\n for index in (df_followUp2.index):\n if df_followUp2.iat[index, 2] is pd.NaT or isinstance(df_followUp2.iat[index, 2], float) or \\\n df_followUp2.iat[index, 2] == '':\n df_followUp2.iat[index, 2] == ''\n else:\n df_followUp2.iat[index, 2] = df_followUp2.iat[index, 2].strftime(\"%d/%m/%Y\")\n\n indexNames = df_followUp2[\n (df_followUp2['Cedula'] == self.handler.R_id) & (\n df_followUp2['Fecha'] == self.handler.R_date)].index\n df_followUp2 = df_followUp2.drop(indexNames)\n self.handler.write_follow_up(df_followUp2)\n QApplication.restoreOverrideCursor()\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_delete_Fu_Success)\n self.close()\n self.wid_graphs.close()\n self.p_card = Patient_card(self.parent, self.handler)\n self.p_card.tabWidget.setCurrentIndex(1)\n self.p_card.show()\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_meeting_stay)\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_cant_delete_row)\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_select_row)\n\n def Delete_D(self):\n item = self.diseases_list.currentItem()\n if not item:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_select_row)\n else:\n diseases_df = self.handler.df_other_diseases.copy()\n indexNames = diseases_df[(diseases_df[\"Enfermedad asociada\"] == item.text()) & (\n diseases_df['Cedula'] == self.handler.R_id)].index\n diseases_df = diseases_df.drop(index=indexNames[0])\n self.handler.write_other_diseases_data_frame(diseases_df)\n self.get_data()\n QtWidgets.QMessageBox.information(self, self.Message_Success, self.Message_disease_deleted)\n\n def update_T2(self):\n index = (self.progress_table.selectionModel().currentIndex())\n self.handler.Update_R_date(index.sibling(index.row(), 0).data())\n if self.handler.R_date:\n if self.handler.R_date != 'Pre-Qx':\n self.close()\n if inspect.isclass(self.update_FU):\n self.update_FU.show()\n else:\n self.update_FU = update_FU(self, self.handler)\n self.update_FU.show()\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_only_change_in_personal_info)\n\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_select_row)\n\n def getBMI(self, weight):\n bmi = (weight / (self.height ** 2))\n formatted_bmi = \"{:.2f}\".format(bmi)\n return formatted_bmi\n\n def getBMInum(self, weight, BMI):\n ideal_weight = BMI * (self.height ** 2)\n formatted_ideal_weight = \"{:.0f}\".format(ideal_weight)\n overweight = weight - ideal_weight\n formatted_overweight = \"{:.0f}\".format(overweight)\n overweight_percentage = ((weight / ideal_weight) - 1)\n formatted_overweight_percentage = \"{:.2f}\".format(overweight_percentage)\n results = [formatted_ideal_weight, formatted_overweight, formatted_overweight_percentage, ideal_weight]\n return results\n\n def create_graph2(self):\n self.wid_graphs = QtWidgets.QWidget(self.tab)\n self.wid_graphs.setGeometry(0, 90, 1300, 400)\n grid_graphs = QtWidgets.QGridLayout(self.wid_graphs)\n x_data = []\n weight_y_data = []\n weightLost_y_data = []\n BMI_y_data = []\n weightLostPresent_y_data = []\n for row in range(self.progress_table.rowCount()):\n if row == 0:\n x_data.append(0)\n dif_start_ideal = self.weight - self.init_ideal_W25\n weight_y_data.append(self.weight)\n weightLost_y_data.append(0)\n BMI_y_data.append(float(self.progress_table.item(row, 3).text()))\n weightLostPresent_y_data.append(0)\n else:\n x_data.append(int(self.progress_table.item(row, 1).text()))\n weight_y_data.append(float(self.progress_table.item(row, 2).text()))\n weightLost_y_data.append(float(self.progress_table.item(row, 4).text()))\n BMI_y_data.append(float(self.progress_table.item(row, 3).text()))\n\n dif_start_current = float(self.progress_table.item(row, 4).text())\n weightLostPresent_y_data.append(round(dif_start_current / dif_start_ideal, 2))\n fig_graphs = plt.figure(tight_layout=True)\n number_of_mounth = len(x_data)\n ind = np.arange(number_of_mounth)\n width = 0.5\n ax_weight, ax_BMI, ax_lost = fig_graphs.subplots(1, 3)\n\n rectsWeight = ax_weight.bar(ind, weight_y_data, width, edgecolor=\"white\", color='tab:blue')\n rectsWeightLost = ax_weight.bar(ind + width, weightLost_y_data, width, edgecolor=\"white\", color='tab:green')\n ax_weight.set_xlabel(\"Month From Procedure\")\n ax_weight.set_ylabel(\"Weight [kg]\")\n ax_weight.set_title(f'Weight Over Month')\n ax_weight.set_xticks(ind + width / 2)\n ax_weight.set_xticklabels(x_data)\n ax_weight.legend((rectsWeight[0], rectsWeightLost[0]), ('weight', 'weight lost'))\n\n ax_BMI.plot(x_data, BMI_y_data, '-o', color='tab:blue')\n ax_BMI.axhline(25, color='r')\n ax_BMI.set_xlabel(\"Month From Procedure\")\n ax_BMI.set_ylabel(\"BMI\")\n ax_BMI.set_title(f'BMI Over Month')\n ax_BMI.set_ylim([0, 70])\n\n rectsLostPer = ax_lost.bar(ind, weightLostPresent_y_data, width, edgecolor=\"white\", color='tab:blue')\n ax_lost.set_ylim([-0.5, 1.5])\n ax_lost.set_xlabel(\"Month From Procedure\")\n ax_lost.set_ylabel(\"Weight Lost [%]\")\n ax_lost.set_title(f'Lost Weight Over Month')\n ax_lost.set_xticks(ind + width / 2)\n ax_lost.set_xticklabels(x_data)\n\n self.autolabel(rectsWeight, ax_weight)\n self.autolabel(rectsWeightLost, ax_weight)\n self.autolabel(rectsLostPer, ax_lost)\n\n self.plt = fig_graphs\n canvas_weight = FigureCanvas(fig_graphs)\n grid_graphs.addWidget(canvas_weight, 0, 0)\n self.wid_graphs.show()\n\n def autolabel(self, rects, ax):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,\n float(height), backgroundcolor='0.85', fontsize='xx-small',\n ha='center', va='bottom')\n\n def ShowFollowUp(self):\n self.progress_table.selectionModel().clear()\n self.set_progress_deatails()\n self.show()\n\n def re_open_this_window(self):\n self.get_data()\n super(Patient_card, self).show()\n\n def re_open_this_window_T2(self):\n self.get_data()\n self.tabWidget.setCurrentIndex(1)\n super(Patient_card, self).show()\n\n def move_main(self):\n self.close()\n self.wid_graphs.close()\n self.parent.parent.re_show()\n\n def move_to_findP(self):\n self.close()\n self.wid_graphs.close()\n self.parent.re_show()\n\n def move_add_follow(self):\n if str(self.date) != 'Pendiente':\n self.wid_graphs.close()\n self.close()\n if inspect.isclass(self.add_follow_m):\n self.add_follow_m.show()\n else:\n self.add_follow_m = addFollow_m(self, self.handler)\n self.add_follow_m.show()\n else:\n QtWidgets.QMessageBox.information(self, self.Message_Fail, self.Message_patient_still_waiting)\n\n def go_to_diseases(self):\n self.setDisabled(True)\n self.other_d_window = other_diseases(self, self.handler)\n self.other_d_window.show()\n\n def finish_diseases(self):\n self.setDisabled(False)\n self.get_data()\n\n def Messages(self):\n self.Message_select_row = 'First select a row please'\n self.Message_cant_delete_row = 'You cant delete this row'\n self.Message_meeting_stay = 'Meeting still in system'\n self.Message_delete_Fu_Success = 'You delete this meeting successfully'\n self.Message_delete_patient_Success = 'Patient deleted fron the system'\n self.Message_patient_stay = 'Patient still in system'\n self.Message_patient_update = 'Patient card is updated'\n self.Message_enter_DOT_again = 'Please enter date of birth'\n self.Message_image_saved = 'Image has been saved!!'\n self.Message_folder_not_exist = 'Folder does not exist'\n self.Message_only_change_in_personal_info = 'This row can only be change in personal info'\n self.Message_patient_still_waiting = 'Cant add meeting if patient still waiting for surgery'\n self.Message_disease_deleted = 'The disease deleted'\n self.Message_remove_patient = 'Are you sure you want to remove this patient?'\n self.titel_for_remove_patient = 'Remove User List?'\n self.Message_remove_meeting = 'Are you sure you want to remove this meeting?'\n self.titel_for_remove_meeting = 'Remove meeting List?'\n self.Message_Success = 'Success'\n self.Message_Fail = 'Fail'\n","repo_name":"slimdavid44/WeigthControlSW","sub_path":"Scripts/patient_card.py","file_name":"patient_card.py","file_ext":"py","file_size_in_byte":35078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16408953759","text":"from __future__ import with_statement\n\nfrom django.db import transaction\nfrom django.conf import settings\n\nimport os\nimport re\n\nslug_regex = re.compile('[^a-z0-9_]')\nmigration_file_regex = re.compile('^(\\d+)_([a-z0-9_]+)\\.py$')\n\nclass Migration(object):\n\tdef __init__(self):\n\t\tpass\n\n\tdef create(self, name):\n\t\tfrom simplemigrations.template import migration_template\n\n\t\tnew_filename = os.path.join(settings.MIGRATION_DIRECTORY, self.new_filename(name))\n\n\t\twith open(new_filename, \"w\") as f:\n\t\t\tf.write(migration_template)\n\n\t\treturn new_filename\n\n\tdef new_filename(self, name):\n\t\tslug_name = self.name_to_slug(name)\n\t\tnum = self.max_migration() + 1\n\t\treturn \"%04d_%s.py\" % (num, slug_name)\n\n\tdef name_to_slug(self, name):\n\t\tname = name.lower().replace(' ', '_')\n\t\treturn slug_regex.sub('', name)\n\n\tdef files(self):\n\t\treturn [f for f in os.listdir(settings.MIGRATION_DIRECTORY) \\\n\t\t\tif os.path.isfile(os.path.join(settings.MIGRATION_DIRECTORY, f)) and migration_file_regex.match(f)]\n\n\tdef migration_files(self):\n\t\treturn [f for f in self.files() if migration_file_regex.match(f)]\n\n\tdef migration_files_with_version(self):\n\t\treturn [(f, int(migration_file_regex.match(f).groups()[0])) for f in self.migration_files()]\n\n\tdef max_migration(self):\n\t\tnums = [t[1] for t in self.migration_files_with_version()]\n\t\tif len(nums):\n\t\t\treturn max(nums)\n\t\telse:\n\t\t\treturn 0\n\n\tdef migrations_to_run(self):\n\t\tfrom simplemigrations.models import AppliedMigration\n\t\tlatest_version = AppliedMigration.latest_version()\n\t\treturn sorted([t for t in self.migration_files_with_version() if t[1] > latest_version], key=lambda x: x[1])\n\n\tdef migration_file(self, version):\n\t\tfor t in self.migration_files_with_version():\n\t\t\tif t[1] == version:\n\t\t\t\treturn t[0]\n\t\treturn None\n\n\tdef load_migration_model(self, file_path):\n\t\timport imp\n\t\tdir_name, file_name = os.path.split(file_path)\n\t\tmod_name = file_name.replace('.py', '')\n\t\tdot_py_suffix = ('.py', 'U', 1)\n\t\tmod = imp.load_module(mod_name, open(file_path), file_path, dot_py_suffix)\n\t\treturn mod\n\n\t@transaction.commit_manually\n\tdef run(self):\n\t\tfor t in self.migrations_to_run():\n\t\t\tfile_name, version = t\n\t\t\tfile_path = os.path.join(settings.MIGRATION_DIRECTORY, file_name)\n\t\t\tklass = self.load_migration_model(file_path)\n\t\t\tself.migrate_up(klass, file_name, version)\n\t\t\ttransaction.commit()\n\n\t@transaction.autocommit\n\tdef migrate_up(self, klass, file_name, version):\n\t\tfrom simplemigrations.models import AppliedMigration\n\n\t\tm = klass.Migration()\n\t\tm.run(action='up')\n\t\tAppliedMigration.objects.create(filename=file_name, version=version)\n\t\t#transaction.commit_unless_managed()\n\n\t@transaction.autocommit\n\tdef migrate_down(self, klass, instance):\n\t\tm = klass.Migration()\n\t\tm.run(action='down')\n\t\tinstance.delete()\n\t\t#transaction.commit_unless_managed()\n\n\t@transaction.commit_manually\n\tdef redo(self):\n\t\tfrom simplemigrations.models import AppliedMigration\n\n\t\tam = AppliedMigration.latest()\n\t\tversion = am.version\n\t\tfile_name = self.migration_file(version)\n\t\tfile_path = os.path.join(settings.MIGRATION_DIRECTORY, file_name)\n\t\tklass = self.load_migration_model(file_path)\n\n\t\tself.migrate_down(klass, am)\n\t\tself.migrate_up(klass, file_name, version)\n\n\t\ttransaction.commit()\n\n\tdef undo(self):\n\t\tfrom simplemigrations.models import AppliedMigration\n\n\t\tam = AppliedMigration.latest()\n\t\tversion = am.version\n\t\tfile_name = self.migration_file(version)\n\t\tfile_path = os.path.join(settings.MIGRATION_DIRECTORY, file_name)\n\t\tklass = self.load_migration_model(file_path)\n\n\t\tself.migrate_down(klass, am)\n\n","repo_name":"ricardochimal/simplemigrations","sub_path":"simplemigrations/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"12768243117","text":"from PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout, QApplication\nfrom PyQt5.QtGui import QPainter, QColor, QPen\nfrom PyQt5.QtCore import Qt\nimport sys\n\nclass PumpWidget(QWidget):\n def __init__(self, name, size=75, parent=None):\n super().__init__(parent)\n\n # Define the default mode as idle\n self.mode = \"idle\"\n self.opcName=name\n\n # Set the fixed size of the widget\n self.setFixedSize(size, size)\n\n # Create a QVBoxLayout for the widget\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins(0, 0, 0, 45) # Remove margins\n main_layout.setAlignment(Qt.AlignTop) # Align to the top\n self.setLayout(main_layout)\n\n # Create the QLabel for the pump state\n self.label = QLabel(self)\n self.label.setText(name)\n self.label.setAlignment(Qt.AlignCenter)\n main_layout.addWidget(self.label)\n\n def set_mode(self, mode):\n # Update the mode of the pump and trigger a repaint\n self.mode = mode\n self.update()\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing)\n\n # Define the colors for each mode\n color = {\n \"malfunction\": QColor(255, 0, 0), # Red\n \"idle\": QColor(0, 0, 255), # Blue\n \"operational\": QColor(0, 255, 0) # Green\n }\n\n # Calculate the position and size of the pump elements\n radius = self.width() * 0.5 / 2\n center_x = self.width() / 2\n center_y = self.height() * 0.5 + self.label.height() * 0.6\n\n # Draw the pump symbol\n painter.setPen(QPen(Qt.black, 2))\n\n # Fill the circle with the mode color\n painter.setBrush(color[self.mode])\n #painter.drawEllipse(center_x - radius, center_y - radius, radius * 2, radius * 2)\n painter.drawEllipse(int(center_x - radius), int(center_y - radius), int(radius * 2), int(radius * 2))\n\n # Draw the pump symbol lines\n painter.setPen(QPen(Qt.black, 2))\n painter.drawLine(int(center_x) - int(radius * 0.88), int(center_y) - int(radius * 0.45),\n int(center_x) + int(radius * 0.88), int(center_y) - int(radius * 0.25))\n painter.drawLine(int(center_x) - int(radius * 0.88), int(center_y) + int(radius * 0.45),\n int(center_x) + int(radius * 0.88), int(center_y) + int(radius * 0.25))\n\n def sizeHint(self):\n return self.minimumSizeHint()\n \n def update1(self,val:dict):\n try:\n Auf:bool\n error1:bool\n Auf=val[self.opcName+':Auf']\n error1=val[self.opcName+':error1']\n error2=val[self.opcName+':error2']\n error3=val[self.opcName+':error3']\n\n\n if error1 or error2 or error3:\n self.set_mode('malfunction')\n elif Auf:\n self.set_mode('operational')\n else:\n self.set_mode('idle')\n #print('If Statement done')\n except Exception as e:\n print('Exception raised')\n #print(val[self.opcName])\n print(str(e))\n\n\n\nif __name__ == \"__main__\":\n # Create a QApplication instance\n app = QApplication(sys.argv)\n\n # Create an instance of the PumpWidget\n pump_widget = PumpWidget(\"Pump\", size=50)\n pump_widget.setFixedSize(50, 50)\n\n # Set the initial mode\n pump_widget.set_mode(\"operational\")\n\n # Show the pump widget\n pump_widget.show()\n\n # Start the event loop\n sys.exit(app.exec())\n","repo_name":"ISkory465/metabolon-gui","sub_path":"components/widgets/pump.py","file_name":"pump.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"12965783265","text":"chain = []\n\ndebug_links = False\n\nclass NoEntryNodeException(Exception):\n pass\n\nclass NodeNotFoundException(Exception):\n pass\n\ndef debug(s):\n if debug_links:\n print(s)\n\nclass NFA:\n def __init__(self):\n self.entry_node = None\n self.nodes = {}\n\n def add_node(self, name, is_accepting=False, is_entry=False):\n self.nodes[name] = NFANode(name, is_accepting)\n if is_entry:\n self.entry_node = self.nodes[name]\n return self.nodes[name]\n\n def add_link(self, start, end, label=''):\n if start in self.nodes:\n node = self.nodes[start]\n else:\n raise NodeNotFoundException(\"Node '%s' not found in NFA\" % start)\n if end in self.nodes:\n other = self.nodes[end]\n else:\n raise NodeNotFoundException(\"Node '%s' not found in NFA\" % end)\n node.add_edge(other, label)\n\n def set_entry_node(self, name):\n if name in self.nodes:\n self.entry_node = self.nodes[name]\n else:\n raise NodeNotFoundException(\"Node '%s' not found in NFA\" % name)\n\n def parse(self, string):\n if self.entry_node is None:\n raise NoEntryNodeException('Set an entry point for the NFA')\n return self.entry_node.parse(string)\n\n\nclass NFANode:\n def __init__(self, name, is_accepting=False):\n self.correct = []\n self.name = name\n self.is_accepting = is_accepting\n self.edges = {}\n self.epsilons = []\n\n def __repr__(self):\n return ''\n\n def accepts(self, string):\n if string in self.correct:\n return True\n else:\n return False\n\n def add_edge(self, node, label=''):\n if not label:\n self.epsilons.append(node)\n else:\n if label in self.edges:\n try:\n self.edges[label] = (self.edges[label], node)\n except TypeError:\n self.edges[label] = (self.edges[label], node)\n else:\n self.edges[label] = (node,)\n self.correct.append(label)\n\n def parse(self, string):\n if len(string) == 0:\n if self.is_accepting:\n return True\n else:\n return False\n for edge in self.edges:\n if edge == string[0]:\n for node in self.edges[edge]:\n debug('linking from ' + repr(self) + ' to ' + repr(self.edges[edge]) + ' along link labelled \\'' + edge + '\\'')\n if node.parse(string[1:]):\n return True\n for node in self.epsilons:\n debug('linking from ' + repr(self) + ' to ' + repr(node) + ' along epsilon link')\n chain.append(self.name)\n chain.append('-')\n if node.parse(string):\n return True\n debug('no links labelled \\'%s\\' from node %s' % (string[0], repr(self)))\n return False\n\nnfa = NFA()\n\nnfa.add_node('q0', True, True)\nnfa.add_node('q1')\nnfa.add_node('q2')\nnfa.add_link('q0', 'q1', 'b') # q0 -> q1 over b\nnfa.add_link('q0', 'q2') # q0 -> q2 over epsilon\nnfa.add_link('q1', 'q1', 'a') # q1 -> q1 over b\nnfa.add_link('q1', 'q2', 'a') # q1 -> q2 over a\nnfa.add_link('q1', 'q2', 'b') # q1 -> q2 over b\nnfa.add_link('q2', 'q0', 'a') # q2 -> q0 over a\n\nif __name__ == '__main__':\n while True:\n print(nfa.parse(input('> ')))\n chain = []\n","repo_name":"lyneca/ncss2017","sub_path":"nfa.py","file_name":"nfa.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36490812420","text":"# Напишите программу, которая будет преобразовывать десятичное число в двоичное.\n#\n# Пример:\n#\n# - 45 -> 101101\n# - 3 -> 11\n# - 2 -> 10\n\n# если не использовать метод bin() , то так\n\ndef dec_to_bin(n):\n if n == 0:\n return ''\n else:\n return dec_to_bin(n // 2) + str(n % 2)\n\n\nn = 64\nprint(dec_to_bin(n))\n","repo_name":"ArtsmanDan/python_base","sub_path":"sem3HW/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37646557645","text":"from torch.utils.data import DataLoader\nimport math\nfrom sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util\nfrom sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\nfrom sentence_transformers.readers import InputExample\nimport logging\nfrom datetime import datetime\nimport sys\nimport os\nimport pandas as pd\n\nmodel_name = 'bert-base-chinese'\ntrain_batch_size = 16\nnum_epochs = 4\nmodel_save_path = 'test_output'\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\n\n# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings\nword_embedding_model = models.Transformer(model_name)\n\n# Apply mean pooling to get one fixed sized sentence vector\npooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),\n pooling_mode_mean_tokens=True,\n pooling_mode_cls_token=False,\n pooling_mode_max_tokens=False)\n\nmodel = SentenceTransformer(modules=[word_embedding_model, pooling_model])\ntrain_samples = []\ndev_samples = []\ntest_samples = []\n\ndef load(path):\n df = pd.read_csv(path)\n samples = []\n for idx,item in df.iterrows():\n samples.append(InputExample(texts=[item['sentence1'], item['sentence2']], label=float(item['label'])))\n return samples\n\ntrain_samples = load('NLP_related_projects/BERT/Bert_sim/data/train.csv')\ntest_samples = load('NLP_related_projects/BERT/Bert_sim/data/test.csv')\ndev_samples = load('NLP_related_projects/BERT/Bert_sim/data/dev.csv')\n\ntrain_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)\ntrain_loss = losses.CosineSimilarityLoss(model=model)\nevaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')\nwarmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up\n\n# Train the model\nmodel.fit(train_objectives=[(train_dataloader, train_loss)],\n evaluator=evaluator,\n epochs=num_epochs,\n evaluation_steps=1000,\n warmup_steps=warmup_steps,\n output_path=model_save_path)\n\nmodel = SentenceTransformer(model_save_path)\ntest_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')\ntest_evaluator(model, output_path=model_save_path)\n","repo_name":"qwz111/sentences","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26906885885","text":"#!/usr/bin/env python\n# PYTHON_ARGCOMPLETE_OK\n\"\"\"Create a protocol from a bvec and bval file.\n\nMDT uses a protocol file (with extension .prtcl) to store all the acquisition related values.\nThis is a column based file which can hold, next to the b-values and gradient directions,\nthe big Delta, small delta, gradient amplitude G and more of these extra acquisition details.\n\"\"\"\nimport argparse\nimport os\n\nfrom argcomplete.completers import FilesCompleter\nimport textwrap\nimport mdt.protocols\nfrom mdt.lib.shell_utils import BasicShellApplication\nfrom mdt.protocols import load_bvec_bval\n\n__author__ = 'Robbert Harms'\n__date__ = \"2015-08-18\"\n__maintainer__ = \"Robbert Harms\"\n__email__ = \"robbert@xkls.nl\"\n\n\nclass CreateProtocol(BasicShellApplication):\n\n def __init__(self):\n super().__init__()\n self.parse_unknown_args = True\n\n def _get_arg_parser(self, doc_parser=False):\n description = textwrap.dedent(__doc__)\n\n examples = textwrap.dedent('''\n mdt-create-protocol data.bvec data.bval\n mdt-create-protocol data.bvec data.bval -o my_protocol.prtcl\n mdt-create-protocol data.bvec data.bval\n mdt-create-protocol data.bvec data.bval --Delta 30 --delta 20\n mdt-create-protocol data.bvec data.bval --sequence-timing-units 's' --Delta 0.03\n mdt-create-protocol data.bvec data.bval --TE ../my_TE_file.txt\n ''')\n epilog = self._format_examples(doc_parser, examples)\n epilog += textwrap.dedent(\"\"\"\n\n Additional columns can be specified using the syntax: \\\"--{column_name} {value}\\\" structure.\n Please note that these additional values will not be auto-converted from ms to s.\n \"\"\")\n\n parser = argparse.ArgumentParser(description=description, epilog=epilog,\n formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('bvec', help='the gradient vectors file').completer = FilesCompleter()\n parser.add_argument('bval', help='the gradient b-values').completer = FilesCompleter()\n parser.add_argument('-s', '--bval-scale-factor', type=float,\n help=\"We expect the b-values in the output protocol in units of s/m^2. \"\n \"Example use: 1 or 1e6. The default is autodetect.\")\n\n parser.add_argument('-o', '--output_file',\n help='the output protocol, defaults to \".prtcl\" in the same '\n 'directory as the bvec file.').completer = FilesCompleter()\n\n parser.add_argument('--sequence-timing-units', choices=('ms', 's'), default='ms',\n help=\"The units of the sequence timings. The default is 'ms' which we will convert to 's'.\")\n\n parser.add_argument('--G',\n help=\"The gradient amplitudes in T/m.\")\n\n parser.add_argument('--maxG',\n help=\"The maximum gradient amplitude in T/m. This is only useful if we need to guess \"\n \"big Delta and small delta. Default is 0.04 T/m\")\n\n parser.add_argument('--Delta',\n help=\"The big Delta to use, either a single number or a file with either a single number \"\n \"or one number per gradient direction.\")\n\n parser.add_argument('--delta',\n help=\"The small delta to use, either a single number or a file with either a single number \"\n \"or one number per gradient direction.\")\n\n parser.add_argument('--TE',\n help=\"The TE to use, either a single number or a file with either a single number \"\n \"or one number per gradient direction.\")\n\n parser.add_argument('--TR',\n help=\"The TR to use, either a single number or a file with either a single number \"\n \"or one number per gradient direction.\")\n\n return parser\n\n def run(self, args, extra_args):\n bvec = os.path.realpath(args.bvec)\n bval = os.path.realpath(args.bval)\n\n if args.output_file:\n output_prtcl = os.path.realpath(args.output_file)\n else:\n output_prtcl = os.path.join(os.path.dirname(bvec),\n os.path.splitext(os.path.basename(bvec))[0] + '.prtcl')\n\n if args.bval_scale_factor:\n bval_scale_factor = float(args.bval_scale_factor)\n else:\n bval_scale_factor = 'auto'\n\n protocol = load_bvec_bval(bvec=bvec, bval=bval, bval_scale=bval_scale_factor)\n\n if args.G is None and args.maxG is not None:\n if os.path.isfile(str(args.maxG)):\n protocol = protocol.with_added_column_from_file('maxG', os.path.realpath(str(args.maxG)), 1)\n else:\n protocol = protocol.with_new_column('maxG', float(args.maxG))\n\n if args.Delta is not None:\n protocol = add_sequence_timing_column_to_protocol(protocol, 'Delta', args.Delta, args.sequence_timing_units)\n if args.delta is not None:\n protocol = add_sequence_timing_column_to_protocol(protocol, 'delta', args.delta, args.sequence_timing_units)\n if args.TE is not None:\n protocol = add_sequence_timing_column_to_protocol(protocol, 'TE', args.TE, args.sequence_timing_units)\n if args.TR is not None:\n protocol = add_sequence_timing_column_to_protocol(protocol, 'TR', args.TR, args.sequence_timing_units)\n if args.G is not None:\n protocol = add_column_to_protocol(protocol, 'G', args.G, 1)\n\n protocol = add_extra_columns(protocol, extra_args)\n\n mdt.protocols.write_protocol(protocol, output_prtcl)\n\n\ndef add_extra_columns(protocol, extra_args):\n key = None\n for element in extra_args:\n if '=' in element and element.startswith('--'):\n key, value = element[2:].split('=')\n protocol = add_column_to_protocol(protocol, key, value, 1)\n elif element.startswith('--'):\n key = element[2:]\n else:\n protocol = add_column_to_protocol(protocol, key, element, 1)\n return protocol\n\n\ndef add_column_to_protocol(protocol, column, value, mult_factor):\n if value is not None:\n if os.path.isfile(value):\n return protocol.with_added_column_from_file(column, os.path.realpath(value), mult_factor)\n else:\n return protocol.with_new_column(column, float(value) * mult_factor)\n\n\ndef add_sequence_timing_column_to_protocol(protocol, column, value, units):\n mult_factor = 1e-3 if units == 'ms' else 1\n return add_column_to_protocol(protocol, column, value, mult_factor)\n\n\ndef get_doc_arg_parser():\n return CreateProtocol().get_documentation_arg_parser()\n\n\nif __name__ == '__main__':\n CreateProtocol().start()\n","repo_name":"robbert-harms/MDT","sub_path":"mdt/cli_scripts/mdt_create_protocol.py","file_name":"mdt_create_protocol.py","file_ext":"py","file_size_in_byte":6957,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"81"} +{"seq_id":"18199210631","text":"from turtle import Screen\r\nfrom player import Player\r\nimport LevelController\r\nimport time\r\n\r\nSTARTING_LEVEL = 0\r\n\r\nscreen = Screen()\r\nscreen.setup(width=600, height=800)\r\nscreen.bgpic(\"simplelanes.gif\")\r\nscreen.title(\"Toadster\")\r\nscreen.tracer(0)\r\n\r\ntoadster = Player()\r\n\r\nlevel_controller = LevelController.Level()\r\n\r\nscreen.listen()\r\nscreen.onkey(toadster.move_up, \"Up\")\r\nscreen.onkey(toadster.move_down, \"Down\")\r\nscreen.onkey(toadster.move_left, \"Left\")\r\nscreen.onkey(toadster.move_right, \"Right\")\r\n\r\ngame_is_on = True\r\nwhile game_is_on:\r\n screen.update()\r\n time.sleep(0.1)\r\n if toadster.ycor() >= 380:\r\n if level_controller.level_number == 6:\r\n level_controller.win_screen()\r\n game_is_on = False\r\n else:\r\n toadster.reset_player()\r\n level_controller.update_level_number()\r\n for opp in level_controller.all_the_opps:\r\n opp.cross_street()\r\n if toadster.distance(opp) < 15:\r\n level_controller.lose_screen()\r\n game_is_on = False\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nscreen.exitonclick()\r\n","repo_name":"Shaqaveli/Toadster","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71571420106","text":"\"\"\"\nWebSockets Client Helpers\n\nPrimarily deals with the asynchronous implementation\n\"\"\"\nimport asyncio\nimport json\nimport sys\nimport traceback\nimport typing as T\nimport aiohttp\nfrom engine.models.items import CombinedItem\n\nfrom engine.models.battle import BattleRenderLog\nfrom engine.models.party import PartyConfig\nfrom server.api.base import ReportingResponse\nfrom server.api.websocket import AddToTeam, CombineItems, FinishedRenderingBattle, GiveItemToPokemon, MoveToParty, MoveToStorage, ReleaseFromParty, ReleaseFromStorage, RemoveItemFromPokemon, RenderBattle, UpdatePartyConfig, UseHeroPower, UseItem, UseItemRequest\nfrom server.api.websocket import CatchShop\nfrom server.api.websocket import RemoveFromTeam\nfrom server.api.websocket import RollShop\nfrom server.api.websocket import ShiftTeamDown\nfrom server.api.websocket import ShiftTeamUp\n\nfrom utils.context import GameContext\n\nif T.TYPE_CHECKING:\n from pydantic import BaseModel\n from aiohttp.client import ClientWebSocketResponse\n from websockets.legacy.client import WebSocketClientProtocol\n\n\nclass WebSocketClient:\n \"\"\"\n WebSocket client object\n \"\"\"\n\n def __init__(self, client):\n loop = asyncio.get_event_loop()\n self.session = aiohttp.ClientSession(loop=loop)\n self.client: ClientWebSocketResponse = client\n\n async def disconnect(self):\n self.client = await self.client.close()\n\n async def send_request(\n self,\n endpoint: str,\n request: \"BaseModel\",\n response_type: ReportingResponse\n ):\n \"\"\"\n Issue a request. Record exceptions.\n\n response_type right now should be ReportingResponse by default\n \"\"\"\n\n # format into request type\n # TODO: write a BaseModel for this\n formatted = {\"endpoint\": endpoint, \"payload\": request.json()}\n\n try:\n await self.client.send(json.dumps(formatted))\n raw = await self.client.recv()\n response = response_type.parse_raw(json.loads(raw))\n if not response.success:\n raise RuntimeError(f\"Websocket remote error: {response.message}\")\n return response\n except Exception as exc:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback)\n print(f'Encountered exception in WebSocket client: {repr(exc)}')\n\n async def implement_api_client(\n self,\n api_class,\n context: GameContext,\n response_type=ReportingResponse,\n **kwargs\n ):\n \"\"\"\n Take an API class object and formulate the correct input for it.\n\n Assumes that the request is a class that inherits WebSocketPlayerRequest\n \"\"\"\n request_type = api_class.REQUEST_TYPE\n request = request_type.from_game_context(context, **kwargs)\n return await self.send_request(api_class.__name__, request, response_type=response_type)\n\n # oh fucking boy time to redo the API implementations\n # SHOP APIs\n async def roll_shop(self, context: GameContext):\n \"\"\"\n Roll the Shop\n \"\"\"\n await self.implement_api_client(RollShop, context)\n\n async def catch_pokemon(self, context: GameContext, shop_index: int):\n \"\"\"\n Catch a Pokemon\n \"\"\"\n await self.implement_api_client(CatchShop, context, shop_index=shop_index)\n\n # PARTY CONFIG APIs\n async def update_party_config(self, context: GameContext, party_config: PartyConfig):\n \"\"\"\n Update a party config\n \"\"\"\n await self.implement_api_client(UpdatePartyConfig, context, party_config=party_config)\n\n # TEAM APIs\n async def shift_team_up(self, context: GameContext, team_index: int):\n \"\"\"\n Shift a team member up\n \"\"\"\n await self.implement_api_client(ShiftTeamUp, context, team_index=team_index)\n\n async def shift_team_down(self, context: GameContext, team_index: int):\n \"\"\"\n Shift a team member down\n \"\"\"\n await self.implement_api_client(ShiftTeamDown, context, team_index=team_index)\n\n async def remove_team_member(self, context: GameContext, team_index: int):\n \"\"\"\n Remove team member\n \"\"\"\n await self.implement_api_client(RemoveFromTeam, context, team_index=team_index)\n\n # PARTY APIs\n async def add_to_team(self, context: GameContext, party_index: int):\n \"\"\"\n Add party member to team\n \"\"\"\n await self.implement_api_client(AddToTeam, context, party_index=party_index)\n\n async def release_from_party(self, context: GameContext, party_index: int):\n \"\"\"\n Release party member from team\n \"\"\"\n await self.implement_api_client(ReleaseFromParty, context, party_index=party_index)\n\n async def move_to_storage(self, context: GameContext, party_index: int):\n \"\"\"\n Move party to storage\n \"\"\"\n await self.implement_api_client(MoveToStorage, context, party_index=party_index)\n\n # STORAGE APIs\n async def move_to_party(self, context: GameContext, storage_index: int):\n await self.implement_api_client(MoveToParty, context, storage_index=storage_index)\n\n async def release_from_storage(self, context: GameContext, storage_index: int):\n await self.implement_api_client(ReleaseFromStorage, context, storage_index=storage_index)\n\n # ITEM APIs\n async def use_item(self, context: GameContext, item_id: str):\n await self.implement_api_client(\n UseItem,\n context,\n item_id=item_id\n )\n \n async def give_item_to_pokemon(self, context: GameContext, item_id: str, pokemon_id: str):\n await self.implement_api_client(\n GiveItemToPokemon,\n context,\n item_id=item_id,\n pokemon_id=pokemon_id\n )\n\n async def remove_item_from_pokemon(self, context: GameContext, pokemon_id: str):\n await self.implement_api_client(RemoveItemFromPokemon, context, pokemon_id=pokemon_id)\n\n async def combine_items(self, ctx: GameContext, primary_item_id: str, secondary_item_id: str):\n await self.implement_api_client(\n CombineItems,\n ctx,\n primary_item_id=primary_item_id,\n secondary_item_id=secondary_item_id\n )\n\n async def use_hero_power(self, ctx: GameContext):\n await self.implement_api_client(UseHeroPower, ctx)\n\n async def render_battle(self, ctx: GameContext):\n return await self.implement_api_client(\n RenderBattle,\n ctx,\n response_type=BattleRenderLog,\n )\n\n async def finish_rendering_battle(self, ctx: GameContext):\n return await self.implement_api_client(FinishedRenderingBattle, ctx)\n","repo_name":"chillymango/autobattler_demo","sub_path":"utils/websockets_client.py","file_name":"websockets_client.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31391384976","text":"class Solution:\n def __init__(self):\n self.sol = []\n\n def subMake(self, s):\n if len(s) == 1:\n return [s]\n elif s[0]==s[-1]=='0':\n return []\n elif s[0]=='0':\n return [s[0:1]+'.'+s[1:]]\n elif s[-1]=='0':\n return [s]\n sol = [s]\n for i in range(1,len(s)):\n sol.append(s[:i]+'.'+s[i:])\n return sol\n \n def makeCoordinate(self, s, d):\n a, b = s[0:d], s[d:]\n l = self.subMake(a)\n r = self.subMake(b)\n for i in l:\n for j in r:\n self.sol.append('('+i+', '+j+')')\n \n def ambiguousCoordinates(self, s: str) -> List[str]:\n s=s[1:-1]\n for i in range(1,len(s)):\n self.makeCoordinate(s,i)\n return self.sol","repo_name":"jitaeyun/algorithm","sub_path":"leetcode/Python/ambiguous-coordinates.py","file_name":"ambiguous-coordinates.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29930394863","text":"from random import randint\nfrom typing import Tuple, Dict, Callable, List\n\nfrom .battlebasedroutine import BattleBasedRoutine\nfrom ..gatherer import Gatherer\nfrom ...board.simulation.simultaneous_alphabeta import Value\nfrom ...characters.moves import MoveDescriptor\nfrom ...controls.controllers import Bot\nfrom ...game import API\n\n\nclass ImitationRoutine(BattleBasedRoutine):\n def __init__(self, own_controller: Bot, opponent_controllers: List[Bot], gatherer: Gatherer,\n possible_moves: Tuple[MoveDescriptor, ...], eval_fct: Callable[[API], Dict[int, Value]],\n max_depth: int = -1, must_write_files: bool = True, must_keep_temp_files: bool = False,\n min_end_states: int=1, min_victories: int=0, random_move_probability: float=0,\n random_controller: Bot=None):\n super().__init__(opponent_controllers, gatherer, possible_moves, eval_fct, max_depth, must_write_files,\n must_keep_temp_files, min_end_states, min_victories)\n own_controller.getReady()\n self._ownControllerPlayerNumber = own_controller.playerNumber\n self._bots[own_controller.playerNumber] = own_controller\n self._randomMoveProbability = 0\n self._randomController = random_controller\n\n def _generateMovesList(self, state: API):\n must_be_random = False\n if self._randomMoveProbability > 0 and self._randomController is not None:\n threshold = self._randomMoveProbability * 100\n number = randint(0, 100)\n if number < threshold:\n must_be_random = True\n if must_be_random:\n temp = self._bots[self._ownControllerPlayerNumber]\n self._bots[self._ownControllerPlayerNumber] = self._randomController\n combinations = super()._generateMovesList(state)\n self._bots[self._ownControllerPlayerNumber] = temp\n else:\n combinations = super()._generateMovesList(state)\n return combinations\n","repo_name":"Angeall/pyTGF","sub_path":"pytgf/data/routines/imitationroutine.py","file_name":"imitationroutine.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10738455191","text":"# Given a DNA string, return the numbers of A, C, G, T bases, respectively.\n# Link: https://rosalind.info/problems/dna/\n\ndef count_dna_bases(dna_file):\n # open downloaded file\n with open(dna_file, 'r') as f:\n dna = f.read()\n \n # count number of occurences of each base in DNA file\n base_counts = {'A':0, 'C':0, 'G':0, 'T':0}\n for base in base_counts.keys():\n base_counts[base] = dna.count(base)\n\n print(base_counts) \n\nif __name__ == '__main__':\n count_dna_bases('/Users/jakeharris/Downloads/rosalind_ini.txt')","repo_name":"jakecharris/rosalind-problems","sub_path":"DNA.py","file_name":"DNA.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7528916661","text":"import numpy as np\nimport librosa\nimport os\nimport resemblyzer\nimport pickle\ndef augment_data():\n noise_names = []\n for noise_path in os.listdir('./noise'):\n noise_name = noise_path.split('.')[0]\n noise = f'./noise/{noise_path}'\n for username in os.listdir('./data'):\n data_encode = []\n for user_file in os.listdir(f'./data/{username}'):\n y, sr = librosa.load(f'./data/{username}/{user_file}',\n sr=16000)\n encoded_data = resemblyzer.preprocess_wav(f'./data/{username}/{user_file}')\n data_encode.append(encoded_data)\n for i in range(1):\n choice = i\n print(choice)\n\n if choice == 1:\n aug = pitch(mix_bg(y, noise), sr, 0.2)\n elif choice == 2:\n aug = speed(mix_bg(y, noise), 1.2)\n else:\n aug = mix_bg(y, noise)\n\n if not os.path.exists(f'./augmented_data/{username}'):\n os.mkdir(f'./augmented_data/{username}')\n\n librosa.output.write_wav(\n f'./augmented_data/{username}/{noise_name}_{i}.wav',\n aug, sr)\n encoded_data = resemblyzer.preprocess_wav(f'./augmented_data/{username}/{noise_name}_{i}.wav')\n data_encode.append(encoded_data)\n with open(f'data/{username}_encoded_wav.pickle', 'wb') as handle:\n pickle.dump(data_encode, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef pitch(data, sampling_rate, pitch_factor):\n return librosa.effects.pitch_shift(data, sampling_rate, pitch_factor)\n\n\ndef speed(data, speed_factor):\n return librosa.effects.time_stretch(data, speed_factor)\n\n\ndef mix_bg(data, noise_path):\n bg_y, br_sr = librosa.load(noise_path, sr=16000)\n bg_y = bg_y*0.5\n start_ = np.random.randint(bg_y.shape[0] - data.shape[0])\n bg_slice = bg_y[start_:start_ + data.shape[0]]\n wav_with_bg = data + bg_slice * 0.5\n return wav_with_bg\n\n\naugment_data()","repo_name":"tonyD1999/Face-And-Voice-Recognition","sub_path":"Voice-Augment/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13019664134","text":"inp_file = open('2020/day12.txt')\ndirections = []\nfor line in inp_file:\n line = line.strip()\n directions.append((line[0], int(line[1:])))\n\ncards = {\n 'N': 0,\n 'E': 1,\n 'S': 2,\n 'W': 3,\n}\n\ncard_vals = {\n 0: (0, 1), # North\n 1: (1, 0), # East\n 2: (0, -1),# South\n 3: (-1, 0),# West\n}\n\nclass Ferry():\n def __init__(self):\n self.posx = 0\n self.posy = 0\n self.facing = 1\n self.waypointx = 10\n self.waypointy = 1\n\n def move_ferry(self, direction, value):\n self.posx += card_vals[direction][0] * value\n self.posy += card_vals[direction][1] * value\n \n def move_waypoint(self, direction, value):\n self.waypointx += card_vals[direction][0] * value\n self.waypointy += card_vals[direction][1] * value\n\n def move_ferry_forward(self, value):\n self.move_ferry(self.facing, value)\n\n def rotate_ferry(self, orientation, value):\n rotate_amount = value // 90\n if orientation == 'R':\n self.facing = (self.facing + rotate_amount) % 4\n else:\n self.facing = (self.facing - rotate_amount) % 4\n\n def rotate_waypoint(self, orientation, value):\n rotate_amount = value // 90\n if orientation == 'R':\n for _ in range(rotate_amount):\n hold = self.waypointx\n self.waypointx = self.waypointy\n self.waypointy = - hold\n else:\n for _ in range(rotate_amount):\n hold = self.waypointx\n self.waypointx = - self.waypointy\n self.waypointy = hold\n\n def move_to_waypoint(self, times):\n for _ in range(times):\n self.posx += self.waypointx\n self.posy += self.waypointy\n\n def __repr__(self):\n return \"Ferry[x: \" + str(self.posx) + \", y: \" + str(self.posy) + \", facing: \" + str(self.facing) + \"]\"\n\n\ndef part1():\n ferry = Ferry()\n for direction in directions:\n if direction[0] == 'F':\n ferry.move_ferry_forward(direction[1])\n elif direction[0] == 'R' or direction[0] == 'L':\n ferry.rotate_ferry(direction[0], direction[1])\n else:\n ferry.move_ferry(cards[direction[0]], direction[1])\n\n return abs(ferry.posx) + abs(ferry.posy)\n\n\ndef part2():\n ferry = Ferry()\n for direction in directions:\n if direction[0] == 'F':\n ferry.move_to_waypoint(direction[1])\n elif direction[0] == 'R' or direction[0] == 'L':\n ferry.rotate_waypoint(direction[0], direction[1])\n else:\n ferry.move_waypoint(cards[direction[0]], direction[1])\n\n return abs(ferry.posx) + abs(ferry.posy)\n\n\nprint (part1())\nprint (part2())","repo_name":"BetterBelle/AdventOfCode","sub_path":"python/2020/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31392372206","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n v = list()\n cur = head\n k -= 1\n while cur:\n v.append(cur)\n cur = cur.next\n v[k].val, v[-k-1].val = v[-k-1].val, v[k].val\n return head","repo_name":"jitaeyun/algorithm","sub_path":"leetcode/Python/swapping-nodes-in-a-linked-list.py","file_name":"swapping-nodes-in-a-linked-list.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30300371917","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 16 16:19:40 2023\n\n@author: Kasper\n\"\"\"\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport time\nimport ODE_functions\n\n\n\ndef eulerStep(f, x0, t0, h, *args):\n \"\"\" \n Function implements a euler step for (t0 + h)\n \n Parameters\n ----------\n f : Function\n ODE function that returns value of x\n x0 : Float\n Inital value for x\n t0 : Float\n Inital value for time\n h : Float\n Step size\n Returns\n -------\n [xN, tN] : Array values for new step h \n \"\"\"\n dxdt = f(t0, x0)\n xN = x0 + h * dxdt\n tN = t0 + h\n return xN, tN\n\ndef rk4Step(f, x0, t0, h, *args):\n \"\"\" \n Performs Runge-Kutta Four step for (t0 + h)\n Parameters\n ----------\n f : Function\n ODE function that returns value of x\n x0 : Float\n Inital value for x\n t0 : Float\n Inital value for time\n h : Float\n Step size\n Returns\n -------\n [xN, tN] : Array of values for new step h \n \"\"\" \n k1 = np.array(f( t0, x0, *args))\n k2 = np.array(f( t0 + (h/2), x0 + h * (k1/2), *args))\n k3 = np.array(f( t0 + (h/2), x0 + h * (k2/2), *args))\n k4 = np.array(f( t0 + h, x0 + h * k3, *args))\n \n xN = x0 + ((k1 + 2*k2 + 2*k3 + k4)/6) * h\n tN = t0 + h\n\n return xN, tN\n\n\ndef solveTo(f, T, x0, h, method, *args):\n\n \"\"\"\n Solve ode using given method between range of T\n Parameters\n ----------\n 'T' : Array\n Contains two values t0, the intial value time and\n tend, the final of time\n 'x0' : Array\n Inital x value to solve for\n 'f' : Function \n ODE to be solved \n 'deltaTmax' : Float\n Maximum step size\n 'method' : String\n Describes which solver is to be used\n Returns\n -------\n xN : Array of Values for x or x and y\n \"\"\"\n\n methods = {'euler':eulerStep, 'rk4':rk4Step}\n solver = methods[method]\n \n tN = T[0] \n tend = T[-1]\n \n \n xN = x0\n solArray = []\n while (tN + h) < tend:\n xN, tN = solver(f, xN, tN, h, *args)\n solArray.append(xN)\n else:\n xN, tN = solver(f, xN, tN, tend - tN, *args)\n solArray.append(xN)\n \n return xN\n \n\n\ndef solveODE(f, x0, tspan, method, deltaTmax, order, *args):\n \"\"\"\n Solve ode using given method between range of T\n \n Parameters\n ----------\n \n 'f' : Function \n ODE to be solved \n \n 'x0' : Array\n Inital x value to solve for\n \n 'tspan' : Array\n Time to be solved for\n \n 'method' : String\n Describes which solver is to be used\n \n 'deltaTmax' : Float\n Maximum step\n \n 'order' : Boolean\n True for 2nd order ODE False for 1st\n\n Returns\n -------\n solArray: Array containg values for x or x and y\n \"\"\"\n \n \n\n if order:\n solArray = np.empty(shape=(len(tspan), len(x0)))\n \n else:\n solArray = np.empty(shape=(len(tspan), 1))\n solArray[0] = x0\n\n for i in range(len(tspan)-1):\n solArray[i+1] = solveTo(f, ([tspan[i],tspan[i+1]]), solArray[i], deltaTmax, method, *args)\n \n return solArray\n \ndef main():\n \"\"\"\n # Example for solutions to first order ODE x' = x \n # with inital conditions; x(0) = 1 \n # solving from t = 0 till t = 1\n # \"\"\"\n f = ODE_functions.f\n fTrue = ODE_functions.fAnalytical\n tspan = np.linspace(0, 1, 100) \n \n eulerSol = solveODE(f, 1, tspan, 'euler', 0.01, False)\n rk4Sol = solveODE(f, 1, tspan, 'rk4', 0.01, False)\n exactSol = fTrue(tspan)\n \n rk4Error = [np.abs(exactSol[i] - rk4Sol[i]) for i in range(len(exactSol))]\n eulerError = [np.abs(exactSol[i] - eulerSol[i]) for i in range(len(exactSol))]\n \n plt.figure()\n f, axes = plt.subplots(1, 2)\n f.suptitle(\"Plots for 1st order ODE x' = x\", fontsize=16)\n axes[0].plot(tspan,eulerSol,label='euler', marker='x', markersize=3)\n axes[0].plot(tspan,rk4Sol,label='rk4', marker='s', markersize=3)\n axes[0].plot(tspan,exactSol,label='exact', marker='o', markersize=3)\n axes[0].set_ylabel('dx/dt')\n axes[0].set_xlabel('Time')\n axes[0].legend()\n \n \n axes[1].loglog(tspan, eulerError, label = \"euler error\")\n axes[1].loglog(tspan, rk4Error, label = \"rk4 error\")\n axes[1].set_ylabel('Error')\n axes[1].set_xlabel('Time')\n axes[1].legend()\n \n\n \n \"\"\"\n Example for solutions to the 2nd order ODE,\n x'' = -x which is equivalent too,\n x' = y, y' = -x\n solving from t = 0 to 1\n \"\"\"\n \n g = ODE_functions.g\n gTrue = ODE_functions.gAnalytical\n \n eulerSolxy = solveODE(g, [1,1], tspan, 'euler', 0.01, True)\n eulerSolx = eulerSolxy[:,0]\n eulerSoly = eulerSolxy[:,1]\n\n rk4Solxy = solveODE(g, [1,1], tspan, 'rk4', 0.01, True)\n rk4Solx = rk4Solxy[:,0]\n rk4Soly = rk4Solxy[:,1]\n \n exactSolx , exactSoly = gTrue(tspan)\n\n \n plt.figure()\n f, axes = plt.subplots(2, 2)\n f.suptitle(\"Plots for 2nd order ODE x'' = -x \", fontsize=16)\n axes[0,0].plot(tspan,eulerSolx,label='euler', marker='x', markersize=3,linestyle = 'None')\n axes[0,0].plot(tspan,rk4Solx,label='rk4', marker='o', markersize=3,linestyle = 'None')\n axes[0,0].plot(tspan,exactSolx,label='exact')\n axes[0,0].set_ylabel('x')\n axes[0,0].set_xlabel('Time')\n axes[0,0].legend()\n \n \n axes[0,1].plot(tspan,eulerSoly,label='euler', marker='x', markersize=5,linestyle = 'None')\n axes[0,1].plot(tspan,rk4Soly,label='rk4', marker='o', markersize=3,linestyle = 'None')\n axes[0,1].plot(tspan,exactSoly,label='exact')\n axes[0,1].set_ylabel('y(dx/dt)')\n axes[0,1].set_xlabel('Time')\n axes[0,1].legend()\n \n eulerErrorx = abs(eulerSolx-exactSolx)\n rk4Errorx = abs(rk4Solx-exactSolx)\n eulerErrory = abs(eulerSoly - exactSoly)\n rk4Errory = abs(rk4Soly- exactSoly)\n \n \n axes[1,0].loglog(tspan, eulerErrorx, label = \"euler x error\")\n axes[1,0].loglog(tspan, rk4Errorx, label = \"rk4 x error\")\n axes[1,0].set_ylabel('x Error')\n axes[1,0].set_xlabel('Time')\n axes[1,0].legend()\n \n \n axes[1,1].loglog(tspan, eulerErrory, label = \"euler y error\")\n axes[1,1].loglog(tspan, rk4Errory, label = \"rk4 y error\")\n axes[1,1].set_ylabel('y Error')\n axes[1,1].set_xlabel('Time')\n axes[1,1].legend()\n \n \n\n\nif __name__ == '__main__':\n main()\n \n \n \n ","repo_name":"Jasper1242/Exercises","sub_path":"ODE_solver.py","file_name":"ODE_solver.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24109810357","text":"import math\nfrom unittest import mock\n\nimport pytest # noqa: F401\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport theseus as th\nimport theseus.utils as thutils\nfrom theseus.constants import __FROM_THESEUS_LAYER_TOKEN__\nfrom tests.theseus_tests.core.common import (\n MockCostFunction,\n MockCostWeight,\n MockVar,\n create_objective_with_mock_cost_functions,\n)\nfrom theseus.theseus_layer import TheseusLayer\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef model(x, b):\n return b[..., :1] * x**2 + b[..., 1:]\n\n\ndef model_grad(x, b):\n g1 = x**2\n g2 = torch.ones_like(x)\n return g1, g2\n\n\n# This is a cost function of two variables that tries to fit\n# f(x;b) = A * x[0]^ 2 + B to the dataset\n# given by xs and ys. Here the variables for the cost function\n# are A, B, and the goal is to minimize MSE over the\n# dataset. We will pass the variables as a single\n# variable object of dimension 2.\nclass QuadraticFitCostFunction(th.CostFunction):\n def __init__(self, optim_vars, cost_weight, xs=None, ys=None):\n super().__init__(cost_weight, name=\"qf_cost_function\")\n assert len(optim_vars) == 1 and optim_vars[0].dof() == 2\n for i, var in enumerate(optim_vars):\n setattr(self, f\"optim_var_{i}\", var)\n self.register_optim_var(f\"optim_var_{i}\")\n self.xs = xs\n self.ys = ys\n\n self._optim_vars = optim_vars\n\n def error_from_tensors(self, optim_var_0_data):\n pred_y = model(self.xs, optim_var_0_data)\n return self.ys - pred_y\n\n # err = y - f(x:b), where b are the current variable values\n def error(self):\n return self.error_from_tensors(self.optim_var_0.tensor)\n\n def jacobians(self):\n g1, g2 = model_grad(self.xs, self.optim_var_0.tensor)\n return [-torch.stack([g1, g2], axis=2)], self.error()\n\n def dim(self):\n return self.xs.shape[1]\n\n def to(self, *args, **kwargs):\n super().to(*args, **kwargs)\n self.xs = self.xs.to(*args, **kwargs)\n self.ys = self.ys.to(*args, **kwargs)\n\n def _copy_impl(self, new_name=None):\n return QuadraticFitCostFunction(\n [v.copy() for v in self._optim_vars], self.weight.copy(), self.xs, self.ys\n )\n\n\ndef create_qf_theseus_layer(\n xs,\n ys,\n cost_weight=th.ScaleCostWeight(1.0),\n nonlinear_optimizer_cls=th.GaussNewton,\n linear_solver_cls=th.CholeskyDenseSolver,\n max_iterations=10,\n use_learnable_error=False,\n force_vectorization=False,\n):\n variables = [th.Vector(2, name=\"coefficients\")]\n objective = th.Objective()\n cost_function = QuadraticFitCostFunction(variables, cost_weight, xs=xs, ys=ys)\n\n if use_learnable_error:\n # For learnable error we embed the original cost weight as an auxiliary\n # variable that's part of the error function, and now becomes a learnable\n # parameter of the error\n def error_fn(optim_vars, aux_vars):\n # aux_vars is the learned weight\n # note that this is a hybrid cost function since, part of the function\n # follows the structure of QuadraticFitCostFunction, only the error weight\n # factor (aux_vars[0]) is learned\n return aux_vars[0].tensor * cost_function.error_from_tensors(\n optim_vars[0].tensor\n )\n\n if isinstance(cost_weight, th.ScaleCostWeight):\n # this case only hits with the reference layer, for which weight\n # is not learned (just a scalar value of 1)\n cost_weight_dim = None # Vector infers dimension from given cw_data\n cw_data = torch.ones(1, 1)\n elif isinstance(cost_weight, th.DiagonalCostWeight):\n # cw_data is None, since no need to pass data to aux variable,\n # because it will be replaced during forward pass of learned layer\n cost_weight_dim = cost_function.weight.diagonal.shape[1]\n cw_data = None\n\n # in this case the cost weight is a scalar constant of 1.0\n learnable_cost_function = th.AutoDiffCostFunction(\n variables,\n error_fn,\n cost_function.dim(),\n aux_vars=[\n th.Vector(cost_weight_dim, name=\"learnable_err_param\", tensor=cw_data)\n ],\n autograd_vectorize=True,\n autograd_mode=\"vmap\",\n )\n objective.add(learnable_cost_function)\n else:\n objective.add(cost_function)\n\n optimizer = nonlinear_optimizer_cls(\n objective,\n vectorize=False,\n linear_solver_cls=linear_solver_cls,\n max_iterations=max_iterations,\n )\n\n if hasattr(optimizer, \"linear_solver\"):\n assert isinstance(optimizer.linear_solver, linear_solver_cls)\n assert not objective.vectorized\n\n if force_vectorization:\n th.Vectorize._handle_singleton_wrapper = (\n th.Vectorize._handle_schema_vectorization\n )\n\n theseus_layer = th.TheseusLayer(optimizer, vectorize=True)\n assert objective.vectorized\n\n return theseus_layer\n\n\ndef get_average_sample_cost(\n x_samples, layer_to_learn, cost_weight_param_name, cost_weight_fn\n):\n cost_opt = None\n n_samples = x_samples.shape[-1]\n for sidx in range(0, n_samples):\n input_values_opt = {\n \"coefficients\": x_samples[:, :, sidx],\n cost_weight_param_name: cost_weight_fn(),\n }\n layer_to_learn.objective.update(input_values_opt)\n if cost_opt is not None:\n cost_opt = cost_opt + torch.sum(layer_to_learn.objective.error(), dim=1)\n else:\n cost_opt = torch.sum(layer_to_learn.objective.error(), dim=1)\n cost_opt = cost_opt / n_samples\n\n return cost_opt\n\n\ndef test_layer_solver_constructor():\n dummy = torch.ones(1, 1)\n for linear_solver_cls in [th.LUDenseSolver, th.CholeskyDenseSolver]:\n layer = create_qf_theseus_layer(\n dummy, dummy, linear_solver_cls=linear_solver_cls\n )\n assert isinstance(\n layer.optimizer.linear_solver.linearization, th.DenseLinearization\n )\n assert isinstance(layer.optimizer.linear_solver, linear_solver_cls)\n assert isinstance(layer.optimizer, th.GaussNewton)\n\n\ndef _run_optimizer_test(\n nonlinear_optimizer_cls,\n linear_solver_cls,\n optimizer_kwargs,\n cost_weight_model,\n use_learnable_error=False,\n verbose=False,\n learning_method=\"default\",\n force_vectorization=False,\n max_iterations=10,\n lr=0.075,\n loss_ratio_target=0.01,\n):\n device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n print(f\"_run_test_for: {device}\")\n print(\n f\"testing for optimizer {nonlinear_optimizer_cls.__name__}, \"\n f\"cost weight modeled as {cost_weight_model}, \"\n f\"linear solver {linear_solver_cls.__name__ if linear_solver_cls is not None else None} \"\n f\"learning method {learning_method}\"\n )\n\n rng = torch.Generator(device=device)\n rng.manual_seed(0)\n\n torch.manual_seed(0) # fix global seed for mlp\n\n # Create the dataset to fit, model(x) is the true data generation process\n batch_size = 16\n num_points = 10\n xs = torch.linspace(0, 10, num_points).repeat(batch_size, 1).to(device)\n xs += 0.1 * torch.randn(batch_size, num_points, generator=rng, device=device)\n\n ys = model(xs, torch.ones(batch_size, 2, device=device))\n # Shift the y values a bit so there is no perfect fit and changing the\n # cost weight results in a different parameter fit\n fake_noise = torch.logspace(-4, 4, num_points, base=math.e).unsqueeze(0).to(device)\n ys -= fake_noise\n\n # First we create a quadratic fit problem with unit cost weight to see what\n # its solution is and use this solution as the target\n layer_ref = create_qf_theseus_layer(\n xs,\n ys,\n nonlinear_optimizer_cls=nonlinear_optimizer_cls,\n linear_solver_cls=linear_solver_cls,\n use_learnable_error=use_learnable_error,\n force_vectorization=force_vectorization,\n max_iterations=max_iterations,\n )\n layer_ref.to(device)\n initial_coefficients = torch.ones(batch_size, 2, device=device) * torch.tensor(\n [0.75, 7], device=device\n )\n with torch.no_grad():\n input_values = {\"coefficients\": initial_coefficients}\n target_vars, _ = layer_ref.forward(\n input_values, optimizer_kwargs={**optimizer_kwargs, **{\"verbose\": verbose}}\n )\n\n # Now create another that starts with a random cost weight and use backpropagation to\n # find the cost weight whose solution matches the above target\n # To do this, we create a diagonal cost weight with an auxiliary variable called\n # \"cost_weight_values\", which will get updated by the forward method of Theseus\n # layer.\n\n # Note: interestingly, if we pass a torch.Parameter parameter as the data to the\n # auxiliary variable of the cost weight, we don't even\n # need to pass updated values through \"objective.update\". I'm doing it this way\n # to check that update works properly\n cost_weight = th.DiagonalCostWeight(\n th.Variable(torch.empty(1, num_points), name=\"cost_weight_values\")\n )\n\n # Here we create the outer loop models and optimizers for the cost weight\n if cost_weight_model == \"direct\":\n cost_weight_params = nn.Parameter(\n torch.randn(num_points, generator=rng, device=device)\n )\n\n def cost_weight_fn():\n return cost_weight_params.clone().view(1, -1)\n\n optimizer = torch.optim.Adam([cost_weight_params], lr=lr)\n\n elif cost_weight_model == \"mlp\":\n mlp = thutils.build_mlp(num_points, 20, num_points, 2).to(device)\n dummy_input = torch.ones(1, num_points, device=device)\n\n def cost_weight_fn():\n return mlp(dummy_input)\n\n optimizer = torch.optim.Adam(mlp.parameters(), lr=lr)\n\n layer_to_learn = create_qf_theseus_layer(\n xs,\n ys,\n cost_weight=cost_weight,\n nonlinear_optimizer_cls=nonlinear_optimizer_cls,\n linear_solver_cls=linear_solver_cls,\n use_learnable_error=use_learnable_error,\n force_vectorization=force_vectorization,\n max_iterations=max_iterations,\n )\n layer_to_learn.to(device)\n layer_to_learn.verify_jacobians()\n\n # Check the initial solution quality to check how much has loss improved later\n\n # When using learnable error function, we don't update the cost weight directly but do it\n # through the parameters of the learnable error\n cost_weight_param_name = (\n \"learnable_err_param\" if use_learnable_error else \"cost_weight_values\"\n )\n input_values = {\n \"coefficients\": initial_coefficients,\n cost_weight_param_name: cost_weight_fn(),\n }\n\n with torch.no_grad():\n pred_vars, info = layer_to_learn.forward(\n input_values, optimizer_kwargs=optimizer_kwargs\n )\n\n loss0 = F.mse_loss(\n pred_vars[\"coefficients\"], target_vars[\"coefficients\"]\n ).item()\n assert not (\n (info.status == th.NonlinearOptimizerStatus.START)\n | (info.status == th.NonlinearOptimizerStatus.FAIL)\n ).all()\n\n print(\"Initial loss: \", loss0)\n # --------- Learning happens here ---------#\n solved = False\n for i in range(200):\n optimizer.zero_grad()\n input_values = {\n \"coefficients\": initial_coefficients,\n cost_weight_param_name: cost_weight_fn(),\n }\n pred_vars, info = layer_to_learn.forward(\n input_values,\n optimizer_kwargs={\n **optimizer_kwargs,\n **{\n \"verbose\": verbose,\n \"backward_mode\": \"implicit\"\n if learning_method == \"direct\"\n else \"unroll\",\n },\n },\n )\n\n assert not (\n (info.status == th.NonlinearOptimizerStatus.START)\n | (info.status == th.NonlinearOptimizerStatus.FAIL)\n ).all()\n\n mse_loss = F.mse_loss(pred_vars[\"coefficients\"], target_vars[\"coefficients\"])\n\n if learning_method == \"leo\":\n # groundtruth cost\n x_gt = target_vars[\"coefficients\"]\n input_values_gt = {\n \"coefficients\": x_gt,\n cost_weight_param_name: cost_weight_fn(),\n }\n layer_to_learn.objective.update(input_values_gt)\n cost_gt = torch.sum(layer_to_learn.objective.error(), dim=1)\n\n # optimizer cost\n x_opt = pred_vars[\"coefficients\"].detach()\n x_samples = layer_to_learn.compute_samples(\n layer_to_learn.optimizer.linear_solver, n_samples=10, temperature=1.0\n ) # batch_size x n_vars x n_samples\n if x_samples is None: # use mean solution\n x_samples = x_opt.reshape(x_opt.shape[0], -1).unsqueeze(\n -1\n ) # batch_size x n_vars x n_samples\n cost_opt = get_average_sample_cost(\n x_samples, layer_to_learn, cost_weight_param_name, cost_weight_fn\n )\n\n # loss value\n l2_reg = F.mse_loss(\n cost_weight_fn(), torch.zeros((1, num_points), device=device)\n )\n loss = (cost_gt - cost_opt) ** 2 + 10.0 * l2_reg\n loss = torch.mean(loss, dim=0)\n else:\n loss = mse_loss\n\n loss.backward()\n optimizer.step()\n\n loss_ratio = mse_loss.item() / loss0\n print(\"Iteration: \", i, \"Loss: \", mse_loss.item(), \". Loss ratio: \", loss_ratio)\n if loss_ratio < loss_ratio_target:\n solved = True\n break\n assert solved\n\n\ndef _solver_can_be_run(lin_solver_cls):\n if lin_solver_cls == th.LUCudaSparseSolver:\n if not torch.cuda.is_available():\n return False\n try:\n import theseus.extlib.cusolver_lu_solver.CusolverLUSolver # noqa: F401\n except Exception:\n return False\n if lin_solver_cls == th.BaspachoSparseSolver:\n try:\n from theseus.extlib.baspacho_solver import ( # noqa: F401\n SymbolicDecomposition,\n )\n except Exception:\n return False\n return True\n\n\n@pytest.mark.parametrize(\n \"nonlinear_optim_cls\", [th.Dogleg, th.GaussNewton, th.LevenbergMarquardt, th.DCEM]\n)\n@pytest.mark.parametrize(\n \"lin_solver_cls\",\n [\n th.CholeskyDenseSolver,\n th.LUDenseSolver,\n th.CholmodSparseSolver,\n th.LUCudaSparseSolver,\n th.BaspachoSparseSolver,\n ],\n)\n@pytest.mark.parametrize(\"use_learnable_error\", [True, False])\n@pytest.mark.parametrize(\"cost_weight_model\", [\"direct\", \"mlp\"])\n@pytest.mark.parametrize(\"learning_method\", [\"default\", \"leo\"])\ndef test_backward(\n nonlinear_optim_cls,\n lin_solver_cls,\n use_learnable_error,\n cost_weight_model,\n learning_method,\n):\n if not _solver_can_be_run(lin_solver_cls):\n return\n optim_kwargs = {\n th.GaussNewton: {},\n th.LevenbergMarquardt: {\n \"damping\": 0.01,\n \"adaptive_damping\": lin_solver_cls not in [th.CholmodSparseSolver]\n and learning_method not in \"leo\",\n },\n th.Dogleg: {},\n th.DCEM: {},\n }[nonlinear_optim_cls]\n if learning_method == \"leo\":\n if lin_solver_cls not in [th.CholeskyDenseSolver, th.LUDenseSolver]:\n # other solvers don't support sampling from system's covariance\n return\n if nonlinear_optim_cls == th.Dogleg:\n return # LEO not working with Dogleg\n if nonlinear_optim_cls == th.DCEM:\n return\n if nonlinear_optim_cls == th.Dogleg and lin_solver_cls != th.CholeskyDenseSolver:\n return\n if nonlinear_optim_cls == th.DCEM:\n if lin_solver_cls != th.CholeskyDenseSolver:\n return\n else:\n lin_solver_cls = None\n\n # test both vectorization on/off\n force_vectorization = torch.rand(1).item() > 0.5\n _run_optimizer_test(\n nonlinear_optim_cls,\n lin_solver_cls,\n optim_kwargs,\n cost_weight_model,\n use_learnable_error=use_learnable_error,\n force_vectorization=force_vectorization,\n learning_method=learning_method,\n max_iterations=10 if nonlinear_optim_cls != th.DCEM else 50,\n lr=1.0\n if nonlinear_optim_cls == th.Dogleg and not torch.cuda.is_available()\n else 0.075,\n )\n\n\ndef test_send_to_device():\n device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n print(f\"test_send_to_device: {device}\")\n\n # Create the dataset to fit, model(x) is the true data generation process\n batch_size = 16\n num_points = 10\n xs = torch.linspace(0, 10, num_points).repeat(batch_size, 1)\n ys = model(xs, torch.ones(batch_size, 2))\n\n layer = create_qf_theseus_layer(xs, ys)\n input_values = {\"coefficients\": torch.ones(batch_size, 2, device=device) * 0.5}\n with torch.no_grad():\n if device != \"cpu\":\n with pytest.raises(ValueError):\n layer.forward(input_values)\n layer.to(device)\n output_values, _ = layer.forward(input_values)\n for k, v in output_values.items():\n assert v.device == input_values[k].device\n\n\ndef test_check_objective_consistency():\n objective, *_ = create_objective_with_mock_cost_functions()\n optimizer = th.GaussNewton(objective, th.CholeskyDenseSolver)\n\n def _do_check(layer_, optimizer_):\n with pytest.raises(RuntimeError):\n layer_.forward({})\n with pytest.raises(RuntimeError):\n optimizer_.optimize(**{__FROM_THESEUS_LAYER_TOKEN__: True})\n\n # Check for adding a factor\n new_cost = MockCostFunction(\n [MockVar(1, name=\"dummy\")],\n [],\n MockCostWeight(MockVar(1, name=\"weight_aux\")),\n )\n layer = TheseusLayer(optimizer)\n objective.add(new_cost)\n _do_check(layer, optimizer)\n\n # Now check erasing a factor\n objective, cost_functions, *_ = create_objective_with_mock_cost_functions()\n optimizer = th.GaussNewton(objective, th.CholeskyDenseSolver)\n objective.erase(cost_functions[0].name)\n _do_check(layer, optimizer)\n\n\ndef test_pass_optimizer_kwargs():\n # Create the dataset to fit, model(x) is the true data generation process\n batch_size = 16\n num_points = 10\n xs = torch.linspace(0, 10, num_points).repeat(batch_size, 1)\n ys = model(xs, torch.ones(batch_size, 2))\n\n layer = create_qf_theseus_layer(\n xs,\n ys,\n nonlinear_optimizer_cls=th.GaussNewton,\n linear_solver_cls=th.CholmodSparseSolver,\n )\n layer.to(\"cpu\")\n input_values = {\"coefficients\": torch.ones(batch_size, 2) * 0.5}\n for tbs in [True, False]:\n _, info = layer.forward(\n input_values, optimizer_kwargs={\"track_best_solution\": tbs}\n )\n if tbs:\n assert (\n isinstance(info.best_solution, dict)\n and \"coefficients\" in info.best_solution\n )\n else:\n assert info.best_solution is None\n\n # Pass invalid backward mode to trigger exception\n with pytest.raises(ValueError):\n layer.forward(input_values, optimizer_kwargs={\"backward_mode\": -1})\n\n # Now test that compute_delta() args passed correctly\n # Path compute_delta() to receive args we control\n def _mock_compute_delta(cls, fake_arg=None, **kwargs):\n if fake_arg is not None:\n raise ValueError\n return layer.optimizer.linear_solver.solve()\n\n with mock.patch.object(th.GaussNewton, \"compute_delta\", _mock_compute_delta):\n layer_2 = create_qf_theseus_layer(xs, ys)\n layer_2.forward(input_values)\n # If fake_arg is passed correctly, the mock of compute_delta will trigger\n with pytest.raises(ValueError):\n layer_2.forward(input_values, {\"fake_arg\": True})\n\n\ndef test_no_layer_kwargs():\n # Create the dataset to fit, model(x) is the true data generation process\n batch_size = 16\n num_points = 10\n xs = torch.linspace(0, 10, num_points).repeat(batch_size, 1)\n ys = model(xs, torch.ones(batch_size, 2))\n\n layer = create_qf_theseus_layer(\n xs,\n ys,\n nonlinear_optimizer_cls=th.GaussNewton,\n linear_solver_cls=th.CholmodSparseSolver,\n )\n layer.to(\"cpu\")\n input_values = {\"coefficients\": torch.ones(batch_size, 2) * 0.5}\n\n # Trying a few variations of aux_vars. In general, no kwargs should be accepted\n # beyond input_tensors and optimization_kwargs, but I'm not sure how to test for\n # this\n with pytest.raises(TypeError):\n layer.forward(input_values, aux_vars=None)\n\n with pytest.raises(TypeError):\n layer.forward(input_values, aux_variables=None)\n\n with pytest.raises(TypeError):\n layer.forward(input_values, auxiliary_vars=None)\n","repo_name":"facebookresearch/theseus","sub_path":"tests/theseus_tests/test_theseus_layer.py","file_name":"test_theseus_layer.py","file_ext":"py","file_size_in_byte":20901,"program_lang":"python","lang":"en","doc_type":"code","stars":1481,"dataset":"github-code","pt":"81"} +{"seq_id":"72086809544","text":"import conect_firebase\nfrom backend.dl_monhoc import tenmh_ma\nfrom backend.dl_adminlop import tenlop_ma\nfrom backend.dl_sinhvien import tensv_ma\nimport datetime\nimport re\ndb=conect_firebase.connect().database()\n# https://qastack.vn/programming/2405292/how-to-check-if-text-is-empty-spaces-tabs-newlines-in-python\n# kt chuooix chir cos khoangr troongs\n\n\ndef cahoc():\n time = datetime.datetime.now()\n now = time.strftime(\"%H:%M:%S\")\n a=\"0\"\n data=db.child(\"CaHoc\").get()\n for i in data.each():\n if(i.val()[\"TGBD\"]<=str(now) and i.val()[\"TGKT\"]>=str(now)):\n a=i.val()[\"TenCa\"]\n return a\n\ndef catkb(matkb):\n a=0\n try:\n data=db.child(\"ThoiKhoaBieu\").order_by_child(\"MaTKB\").equal_to(str(matkb)).get()\n for i in data.each():\n if(i.val()[\"MaTKB\"]==str(matkb)):\n a=i.val()[\"Ca\"]\n except:a=0\n return a\n\n\ndef cong_them_gio(now):\n d1 = datetime.datetime.strptime(now, \"%H:%M:%S\")\n d=d1 + datetime.timedelta(hours=8)\n d=str(d).split()\n return d[1]\ndef tru_gio(now):\n d1 = datetime.datetime.strptime(now, \"%H:%M:%S\")\n d=d1 - datetime.timedelta(hours=5)\n d=str(d).split()\n return d[1]\n\ndef cong_ngay(ngay):\n d1 = datetime.datetime.strptime(ngay, \"%d/%m/%Y\")\n d=d1 + datetime.timedelta(days=180)\n d=str(d).split()\n return d[0]\n\n\n\ndef khoang_tgvao(tenca):\n time = datetime.datetime.now()\n now = time.strftime(\"%H:%M:%S\")\n try:\n data=db.child(\"CaHoc\").order_by_child(\"TenCa\").equal_to(str(tenca)).get()\n for i in data.each():\n print(cong_them_gio(i.val()[\"TGBD\"]))\n if(cong_them_gio(i.val()[\"TGBD\"])>=str(now)):\n return True\n else:return False\n except: return True\ndef khoang_tgra(tenca):\n time = datetime.datetime.now()\n now = time.strftime(\"%H:%M:%S\")\n try:\n data=db.child(\"CaHoc\").order_by_child(\"TenCa\").equal_to(str(tenca)).get()\n for i in data.each():\n if(tru_gio(i.val()[\"TGKT\"])<=str(now)):\n return True\n else:return False\n except: return True\n\n\ndef thong_tin_theo_tkb(magv,ngay,ca):\n a=[]\n try:\n data=db.child(\"ThoiKhoaBieu\").order_by_child(\"Ngay\").equal_to(str(ngay)).get()\n for i in data.each():\n if(i.val()[\"MaGV\"]==str(magv) and i.val()[\"Ngay\"]==str(ngay) and str(ca) in i.val()[\"Ca\"]):\n a.append(tenlop_ma(str(i.val()[\"MaLop\"])))\n a.append(tenmh_ma(str(i.val()[\"MaMH\"])))\n a.append(str(i.val()[\"MaTKB\"]))\n except: a=[]\n return a\n\ndef kt_TT_diemdanh(matkb):\n a=\"\"\n data=db.child(\"ThoiKhoaBieu\").get()\n for i in data.each():\n if(i.val()[\"MaTKB\"]==str(matkb)):\n a=str(i.val()[\"TrangThaiDD\"])\n return a\n\n\ndef diem_danh_vao_csdl(matkb,masv,thongtin,malop,mamh,magv,ngay,ca,tgvao):\n data={'Ma':str(matkb),'MaSV':str(masv),'ThongTin':str(thongtin),'MaLop':str(malop),'MaMH':str(mamh),'MaGV':str(magv),'Ngay':str(ngay),'Ca':str(ca),'TG_Vao':str(tgvao),'TG_Ra':'','GhiChu':''}\n try:\n db.child('DiemDanh').push(data)\n return True\n except:\n return False\ndef hen_ngay_xoa_du_lieu(matkb,ngay):\n ngay=cong_ngay(ngay).split(\"-\")\n a=str(ngay[2])+\"/\"+str(ngay[1])+\"/\"+str(ngay[0])\n data = {'Ma':str(matkb),'Ngay':str(a)}\n try:\n db.child('NgayXoaDL').push(data)\n return True\n except:\n return False\n\ndef xoa_dl_diemdanh(ma):\n try:\n data=db.child('DiemDanh').order_by_child('Ma').equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"Ma\"] == str(ma):\n db.child('DiemDanh').child(i.key()).remove()\n return True\n except:return False\ndef xoa_dl_tkb(ma):\n data=db.child('ThoiKhoaBieu').order_by_child('MaTKB').equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"MaTKB\"] == str(ma):\n db.child('ThoiKhoaBieu').child(i.key()).remove()\n return True\n\n\ndef tra_dl_diemdanh(ngay):\n data=db.child('NgayXoaDL').get()\n for i in data.each():\n date1 = datetime.datetime.strptime(str(ngay), '%d/%m/%Y')\n date2 = datetime.datetime.strptime(str(i.val()[\"Ngay\"]), '%d/%m/%Y')\n if date1 >= date2:\n if xoa_dl_diemdanh(str(i.val()['Ma'])) ==True and xoa_dl_tkb(str(i.val()['Ma']))==True:\n db.child('NgayXoaDL').child(i.key()).remove()\n return True\n\ndef kt_hen_ngay_xoa(matkb):\n try:\n data=db.child('NgayXoaDL').order_by_child('Ma').equal_to(str(matkb)).get()\n for i in data.each():\n if i.val()['Ma']==str(matkb):\n return True\n else:return False\n except:return False\n\ndef update_TT_diemdanh(ma):\n data=db.child(\"ThoiKhoaBieu\").get()\n dl={'TrangThaiDD':'1'}\n for i in data.each():\n if(i.val()[\"MaTKB\"]==str(ma)):\n try:\n db.child(\"ThoiKhoaBieu\").child(i.key()).update(dl)\n return True\n except:\n return False\n# def capnhatthongtin(ma,masv):\n# data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n# dl={'ThongTin':'vắng'}\n# for i in data.each():\n# if(i.val()[\"Ma\"]==str(ma) and i.val()[\"MaSV\"] == str(masv)):\n# try:\n# db.child(\"DiemDanh\").child(i.key()).update(dl)\n# except:\n# print('Lỗi update của hàm capnhatthongtin trong dl_diemdanh')\n\ndef kiemtrathongtin(ma):\n dl={'ThongTin':'Vắng'}\n try:\n data = db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"TG_Vao\"]==str(\"\") or i.val()[\"TG_Ra\"]==str(\"\"):\n db.child(\"DiemDanh\").child(i.key()).update(dl)\n except:print('Lỗi kiemtrathongtin trong dl_diemdanh')\n\ndef bangdiemdanh(ma):\n a=[]\n try:\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if(i.val()[\"Ma\"]==str(ma)):\n e=[i.val()[\"MaSV\"],i.val()[\"MaSV\"] ,i.val()[\"ThongTin\"],i.val()[\"TG_Vao\"],i.val()[\"TG_Ra\"],i.val()[\"GhiChu\"]]\n a.append(e)\n except:\n a=[]\n sx_ma = sorted(a, key=lambda item: (item[0]))\n return sx_ma\ndef bangdiemdanh1(ma,malop):\n a=[]\n\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if(i.val()[\"Ma\"]==str(ma) and i.val()[\"MaLop\"] ==str(malop)):\n e=[i.val()[\"MaSV\"],i.val()[\"MaSV\"] ,i.val()[\"ThongTin\"],i.val()[\"TG_Vao\"],i.val()[\"TG_Ra\"],i.val()[\"GhiChu\"]]\n a.append(e)\n sx_ma = sorted(a, key=lambda item: (item[0]))\n return sx_ma\n\ndef dd_sv_vao(ma):\n a=[]\n try:\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"Ma\"]==str(ma):\n a.append(i.val()[\"MaSV\"])\n \n except:\n a=[]\n return a\ndef dd_sv_ra(ma):\n a=[]\n try:\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"Ma\"]==str(ma) and i.val()[\"TG_Ra\"] != \"\":\n a.append(i.val()[\"MaSV\"])\n except:\n a=[]\n return a\ndef sv_da_dd(ma):\n a=[]\n try:\n data=db.child(\"DiemDanh\").get()\n for i in data.each():\n if i.val()[\"Ma\"]==str(ma):\n a.append(i.val()[\"MaSV\"])\n except:\n a=[]\n return a\n\ndef sv_da_dd_vao(ma):\n a=[]\n try:\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"Ma\"]==str(ma) and i.val()[\"TG_Vao\"] != \"\":\n a.append(i.val()[\"MaSV\"])\n except:\n a=[]\n return a\ndef sv_da_dd_vao1(ma,malop):\n a=[]\n try:\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if i.val()[\"Ma\"]==str(ma) and i.val()[\"TG_Vao\"] != \"\" and i.val()['MaLop']==str(malop):\n a.append(i.val()[\"MaSV\"])\n except:\n a=[]\n return a\n\ndef tg_tre(magv):\n try:\n data=db.child(\"tgtre\").child(str(magv)).get()\n tgtre=data.val()['thoigian']\n except:\n tgtre=\"00:00\"\n \n if tgtre== None:\n tgtre=\"00:00\"\n return tgtre\n\n\ndef khong_dau(s):\n s = re.sub(r'[àáạảãâầấậẩẫăằắặẳẵ]', 'a', s)\n s = re.sub(r'[ÀÁẠẢÃĂẰẮẶẲẴÂẦẤẬẨẪ]', 'A', s)\n s = re.sub(r'[èéẹẻẽêềếệểễ]', 'e', s)\n s = re.sub(r'[ÈÉẸẺẼÊỀẾỆỂỄ]', 'E', s)\n s = re.sub(r'[òóọỏõôồốộổỗơờớợởỡ]', 'o', s)\n s = re.sub(r'[ÒÓỌỎÕÔỒỐỘỔỖƠỜỚỢỞỠ]', 'O', s)\n s = re.sub(r'[ìíịỉĩ]', 'i', s)\n s = re.sub(r'[ÌÍỊỈĨ]', 'I', s)\n s = re.sub(r'[ùúụủũưừứựửữ]', 'u', s)\n s = re.sub(r'[ƯỪỨỰỬỮÙÚỤỦŨ]', 'U', s)\n s = re.sub(r'[ỳýỵỷỹ]', 'y', s)\n s = re.sub(r'[ỲÝỴỶỸ]', 'Y', s)\n s = re.sub(r'[Đ]', 'D', s)\n s = re.sub(r'[đ]', 'd', s)\n return s\n\ndef timkiem_dd(ma,q):\n a=[]\n try:\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(ma)).get()\n for i in data.each():\n if(i.val()[\"Ma\"]==str(ma)):\n e=[i.val()[\"MaSV\"],tensv_ma(i.val()[\"MaSV\"]) ,i.val()[\"ThongTin\"],i.val()[\"TG_Vao\"],i.val()[\"TG_Ra\"],i.val()[\"GhiChu\"]]\n if khong_dau(str(q)) in khong_dau(i.val()[\"MaSV\"]) or khong_dau(str(q)) in khong_dau(tensv_ma(i.val()[\"MaSV\"])) or khong_dau(str(q)) in khong_dau(i.val()[\"ThongTin\"])or khong_dau(str(q)) in khong_dau(i.val()[\"TG_Vao\"])or khong_dau(str(q)) in khong_dau(i.val()[\"TG_Ra\"])or khong_dau(str(q)) in khong_dau(i.val()[\"GhiChu\"]):\n a.append(e)\n except:\n a=[]\n sx_ma = sorted(a, key=lambda item: (item[0]))\n return a\n\ndef capnhat_tgra(matkb,masv,tgra):\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(matkb)).get()\n dl_ktt={'TG_Ra':str(tgra)}\n dl1={'TG_Ra':str(tgra),'ThongTin':str('Vắng')}\n for i in data.each():\n if(i.val()[\"Ma\"] == str(matkb) and i.val()[\"MaSV\"] == str(masv) and str(i.val()[\"TG_Vao\"]) == str(\"\")):\n try:\n db.child(\"DiemDanh\").child(i.key()).update(dl1)\n except:\n print('Lỗi do capnhat_tgra trong dl_diemdanh')\n elif i.val()[\"Ma\"] == str(matkb) and i.val()[\"MaSV\"] == str(masv):\n try:\n db.child(\"DiemDanh\").child(i.key()).update(dl_ktt)\n except:\n print('Lỗi do capnhat_tgra trong dl_diemdanh')\n \n\ndef capnhat_tgvao(matkb,masv,tgvao,tt):\n data=db.child(\"DiemDanh\").order_by_child(\"Ma\").equal_to(str(matkb)).get()\n dl={'TG_Vao':str(tgvao), 'ThongTin':str(tt)}\n for i in data.each():\n if(i.val()[\"Ma\"]==str(matkb) and i.val()[\"MaSV\"]==str(masv)):\n try:\n db.child(\"DiemDanh\").child(i.key()).update(dl)\n return True\n except:\n return False\n \ndef xoasv_dd(matkb,masv):\n try:\n data=db.child(\"DiemDanh\").get()\n for i in data.each():\n if(i.val()[\"Ma\"]==str(matkb) and i.val()[\"MaSV\"]==str(masv)):\n \n db.child(\"DiemDanh\").child(i.key()).remove()\n return True\n except:\n return False\n\ndef diemdanhbangexcel(matkb,masv,thongtin,malop,mamh,magv,ngay,ca,tgvao,tgra):\n data={'Ma':str(matkb),'MaSV':str(masv),'ThongTin':str(thongtin),'MaLop':str(malop),'MaMH':str(mamh),'MaGV':str(magv),'Ngay':str(ngay),'Ca':str(ca),'TG_Vao':str(tgvao),'TG_Ra':str(tgra),'GhiChu':''}\n try:\n db.child('DiemDanh').push(data)\n return True\n except:\n return False\n\ndef xoadd(matkb):\n data=db.child(\"DiemDanh\").get()\n for i in data.each():\n if i.val()[\"Ma\"] == str(matkb):\n db.child(\"DiemDanh\").child(i.key()).remove()\n \n\n# def tgca(tgvao,tgra,ca):\n# data=db.child(\"CaHoc\").get()\n# for i in data.each():\n# if i.val()[\"TenCa\"]==str(ca) and i.val()[\"TGBD\"] <= str(tgvao) and i.val()[\"TGKT\"] >= str(tgra):\n# return True\n# return False\ndef tgca(ca):\n e=[]\n data=db.child(\"CaHoc\").order_by_child(\"TenCa\").equal_to(str(ca)).get()\n for i in data.each():\n if i.val()[\"TenCa\"] == str(ca) :\n e.append(i.val()['TGBD'])\n e.append(i.val()['TGKT'])\n return e\n\ndef tgbd_dd(matkb):\n try:\n data=db.child(\"ThoiKhoaBieu\").order_by_child(\"MaTKB\").equal_to(str(matkb)).get()\n for i in data.each():\n e=i.val()[\"Ca\"]\n a=e[0]\n root=db.child(\"CaHoc\").order_by_child(\"TenCa\").equal_to(str(a)).get()\n for i in root.each():\n tgbd=i.val()[\"TGBD\"]\n return tgbd\n except:\n print(\"Error\")\n\ndef test():\n # data=db.child(\"DiemDanh\").order_by_child(\"TG_Ra\").equal_to(str(\"11:19:08\")).get()\n # dl={'TG_Ra':'17:20:11'}\n # for i in data.each():\n # if(i.val()[\"TG_Ra\"]==str(\"11:19:08\")):\n # try:\n # db.child(\"DiemDanh\").child(i.key()).update(dl)\n # print(\"0k\")\n # except:print(\"k xoá\")\n data=db.child(\"ThoiKhoaBieu\").order_by_child(\"MaTKB\").equal_to(str(\"37\")).get()\n dl={'Ngay':'09/11/2021'}\n for i in data.each():\n if(i.val()[\"MaTKB\"]==str(\"37\")):\n try:\n db.child(\"ThoiKhoaBieu\").child(i.key()).remove()\n print(\"đã update\")\n except:print(\"k update\")\n\n\n","repo_name":"HUYTIEUQUY/face_reconition","sub_path":"backend/dl_diemdanh.py","file_name":"dl_diemdanh.py","file_ext":"py","file_size_in_byte":13590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36814121009","text":"# simple implementation of CAM in PyTorch for the networks such as ResNet, DenseNet, SqueezeNet, Inception\n# from https://github.com/metalbubble/CAM/blob/master/pytorch_CAM.py\n\nimport sys\nimport json\nfrom PIL import Image\nfrom torchvision import models, transforms\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torch import nn\nimport os\nimport torch\nimport numpy as np\nimport cv2\n\n\ndef generateCamClassificationHeatmap(model_input_location, input_image, label_map, desired_label_index):\n net = models.resnet101(pretrained=True)\n #for param in model.parameters():\n # param.requires_grad = False\n num_ftrs = net.fc.in_features\n net.fc = nn.Linear(num_ftrs, 3)\n # if model_input_location is empty, use default weights\n if model_input_location != \"\":\n # https://discuss.pytorch.org/t/on-a-cpu-device-how-to-load-checkpoint-saved-on-gpu-device/349/3\n # no idea what the second and third parameters to torch.load do,\n # but they fix issue of loading model trained on gpu on a host with only cpu, from the link above\n net.load_state_dict(torch.load(model_input_location, map_location=lambda storage, loc: storage))\n\n finalconv_name = 'layer4'\n net.eval()\n\n # hook the feature extractor\n features_blobs = []\n def hook_feature(module, input, output):\n features_blobs.append(output.data.cpu().numpy())\n\n net._modules.get(finalconv_name).register_forward_hook(hook_feature)\n\n # get the softmax weight\n params = list(net.parameters())\n weight_softmax = np.squeeze(params[-2].data.numpy())\n\n def returnCAM(feature_conv, weight_softmax, class_idx):\n # generate the class activation maps upsample to 256x256\n size_upsample = (256, 256)\n bz, nc, h, w = feature_conv.shape\n output_cam = []\n for idx in class_idx:\n cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n cam_img = np.uint8(255 * cam_img)\n output_cam.append(cv2.resize(cam_img, size_upsample))\n return output_cam\n\n\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n preprocess = transforms.Compose([\n transforms.Scale((224,224)),\n transforms.ToTensor(),\n normalize\n ])\n\n img_pil = Image.fromarray(cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB))\n img_tensor = preprocess(img_pil)\n img_variable = Variable(img_tensor.unsqueeze(0))\n logit = net(img_variable)\n\n # download the imagenet category list\n classes = {int(key):value for (key, value)\n in json.load(open(label_map)).items()}\n\n h_x = F.softmax(logit).data.squeeze()\n probs, idx = h_x.sort(0, True)\n\n # output the prediction\n for i in range(0, 3):\n print('{:.3f} -> {}'.format(probs[i], classes[idx[i]]))\n\n # generate class activation mapping for the top1 prediction\n CAMs = returnCAM(features_blobs[0], weight_softmax, [desired_label_index])\n\n # render the CAM and output\n print('showing heatmap for label %s'%classes[desired_label_index])\n height, width, _ = input_image.shape\n grayscaleHeatmap = cv2.resize(CAMs[0],(width, height))\n heatmap = cv2.applyColorMap(grayscaleHeatmap, cv2.COLORMAP_JET)\n return (grayscaleHeatmap, input_image, heatmap * 0.3 + input_image * 0.5)\n\ndef getConnectedComponentsAndImgData(model_input_location, input_image, label_map, desired_label_index):\n grayscaleHeatmap, img, imgAndColorHeatmap = generateCamClassificationHeatmap(model_input_location, input_image, label_map,\n desired_label_index)\n # https://stackoverflow.com/questions/35854197/how-to-use-opencvs-connected-components-with-stats-in-python\n # not that cv2.THRESH_BINARY and cv2.THRESH_OTSU are flags, binary says binary thresholding (i think)\n # otsu automatically figures out the best global thresholding\n # https://docs.opencv.org/3.3.1/d7/d4d/tutorial_py_thresholding.html\n ret, thresh = cv2.threshold(grayscaleHeatmap, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n connectivity = 4\n # invert with bitwise_not as thresh makes the desired regions black and the rest white, but\n # connected components finds the white regions\n output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)\n num_labels, labels, stats, centroids = output\n # sort stats so one with largest area comes first\n # fifth element of each element in stats is size, so getting largest of those\n statsSorted = stats[np.argsort(stats[:, 4])[::-1]]\n return (statsSorted, thresh, grayscaleHeatmap, img, imgAndColorHeatmap)\n\ndef getLargestConnectComponentAsPILImage(model_input_location, input_image, label_map, desired_label_index):\n statsSorted, thresh, grayscaleHeatmap, img, _ = getConnectedComponentsAndImgData(model_input_location, input_image, label_map, desired_label_index)\n # filter out regions that aren't greater than average by at least 20\n # meaning at some significant part of region above threshold\n meanPixelValue = np.mean(thresh)\n # reverse index order here as array index is row then column, aka y then x\n aboveThresholdStats = [s for s in statsSorted if np.mean(thresh[s[1]:(s[1]+s[3]), s[0]:(s[0] + s[2])]) > meanPixelValue]\n # give up if good regions\n if len(aboveThresholdStats) == 0:\n return None\n imgsToReturn = []\n # already sorted, so 0 gets largest\n for regionStat in aboveThresholdStats:\n # object is of form leftmost x, topmost y, wigth, height, size\n x, y, width, height, size = regionStat\n # note that 0,0 is top left in opencv\n # taking subset of image in bounding box\n connectedComponentImg = img[x:(x + width), y:(y+height)]\n # https://docs.opencv.org/3.0-beta/modules/imgcodecs/doc/reading_and_writing_images.html#imread\n # that shows that default color scheme is BGR, not RGB\n # https://stackoverflow.com/questions/13576161/convert-opencv-image-into-pil-image-in-python-for-use-with-zbar-library\n # that provides how to do conversion\n if cv2.cvtColor(connectedComponentImg,cv2.COLOR_BGR2RGB) is None:\n print(\"skipping a region\")\n continue\n imgsToReturn.append(Image.fromarray(cv2.cvtColor(connectedComponentImg,cv2.COLOR_BGR2RGB)))\n if len(imgsToReturn) == 0:\n return None\n return imgsToReturn\n\n\ndef makeAndSaveToFileCamClassificationHeatmap(model_input_location, input_image_location, output_image_location, label_map, desired_label_index):\n input_image = cv2.imread(input_image_location)\n statsSorted, thresh, grayscaleHeatmap, img, imgAndColorHeatmap = getConnectedComponentsAndImgData(model_input_location, input_image,\n label_map, desired_label_index)\n meanPixelValue = np.mean(thresh)\n print(\"full image shape: \" + str(img.shape))\n print(\"Mean pixel value: \" + str(meanPixelValue))\n print(\"Stats sorted: \" + str(statsSorted))\n for regionStat in statsSorted:\n print(\"region \" + str(regionStat) + \" mean pixel value \" + str(\n np.mean(thresh[regionStat[1]:(regionStat[1] + regionStat[3]), regionStat[0]:(regionStat[0] + regionStat[2])])))\n cv2.rectangle(imgAndColorHeatmap, (regionStat[0], regionStat[1]),\n (regionStat[0] + regionStat[2], regionStat[1] + regionStat[3]), (255, 0, 0), 10)\n cv2.imwrite(output_image_location, imgAndColorHeatmap)\n #cv2.imwrite(os.path.dirname(output_image_location) + \"/grayscale.jpg\", grayscaleHeatmap)\n #cv2.imwrite(os.path.dirname(output_image_location) + \"/thresh.jpg\", thresh)\n\n\nif __name__ == \"__main__\":\n makeAndSaveToFileCamClassificationHeatmap(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], int(sys.argv[5]))","repo_name":"David-Durst/fyndoro","sub_path":"objectDetection/classActivationMapResnet.py","file_name":"classActivationMapResnet.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34951680586","text":"import os\nfrom flask import Flask, flash, redirect, render_template, request, url_for\n\nfrom topo import Topology\n\nALLOWED_EXTENSIONS = set(['csv', 'xlsx'])\n\napp = Flask(__name__)\n\n@app.template_filter()\ndef fnum(value):\n value = round(value, 2)\n if int(value) == value:\n value = int(value)\n return value\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n@app.route(\"/topology\", methods=['GET', 'POST'])\ndef topology():\n if 'file' not in request.files:\n flash('Потрібно вибрати файл', category='error')\n return redirect(url_for('index'))\n file = request.files['file']\n filename = file.filename\n if filename == '':\n flash('Файл не вибрано', category='error')\n return redirect(url_for('index'))\n ext = os.path.splitext(filename)[1][1:]\n if not allowed_file(filename):\n flash('Формат {} не підтримується'.format(ext), category='error')\n return redirect(url_for('index'))\n topology = Topology(file=file.stream, type=ext)\n return render_template('topology.html', t=topology.make_report())\n\napp.secret_key = 'fpaiajfbasshougiubfajs'\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"foegit/graphtopology","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32274799809","text":"import os\nimport os.path\nimport sys\nimport re\nimport yaml\nimport CppHeaderParser\n\n\n# Numeric field type (abstract).\nclass _NumericFt:\n # Returns the C++ expression to cast the expression `expr` to the C\n # type of this field type.\n def cast(self, expr):\n return f'static_cast<{self.c_type}>({expr})'\n\n\n# Integer field type (abstract).\nclass _IntFt(_NumericFt):\n def __init__(self, size, pref_disp_base='dec'):\n self._size = size\n self._pref_disp_base = pref_disp_base\n\n # Size (bits).\n @property\n def size(self):\n return self._size\n\n # Preferred display base (`dec` or `hex`).\n @property\n def pref_disp_base(self):\n return self._pref_disp_base\n\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n return {\n 'size': self._size,\n 'preferred-display-base': self._pref_disp_base,\n }\n\n\n# Signed integer field type.\nclass _SIntFt(_IntFt):\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n ret = super().barectf_yaml\n ret['class'] = 'sint'\n return ret\n\n # Equivalent C type\n @property\n def c_type(self):\n return f'std::int{self._size}_t'\n\n\n# Unsigned integer field type.\nclass _UIntFt(_IntFt):\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n ret = super().barectf_yaml\n ret['class'] = 'uint'\n return ret\n\n # Equivalent C type.\n @property\n def c_type(self):\n return f'std::uint{self._size}_t'\n\n\n# Pointer field type.\nclass _PointerFt(_UIntFt):\n def __init__(self):\n super().__init__(64, 'hex')\n\n # Returns the C++ expression to cast the expression `expr` to the C\n # type of this field type.\n def cast(self, expr):\n return f'static_cast<{self.c_type}>(reinterpret_cast({expr}))'\n\n\n# Enumeration field type (abstract).\nclass _EnumFt(_IntFt):\n def __init__(self, size, mappings):\n super().__init__(size)\n self._mappings = mappings.copy()\n\n # Mappings (names to integers).\n @property\n def mappings(self):\n return self._mappings\n\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n ret = super().barectf_yaml\n mappings = {}\n\n for name, val in self._mappings.items():\n mappings[name] = [val]\n\n ret['mappings'] = mappings\n return ret\n\n\n# Unsigned enumeration field type.\nclass _UEnumFt(_EnumFt, _UIntFt):\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n ret = super().barectf_yaml\n ret['class'] = 'uenum'\n return ret\n\n\n# Signed enumeration field type.\nclass _SEnumFt(_EnumFt, _UIntFt):\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n ret = super().barectf_yaml\n ret['class'] = 'senum'\n return ret\n\n\n# Optional string field type.\nclass _OptStrFt:\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n return {\n 'class': 'str',\n }\n\n\n# String field type.\nclass _StrFt(_OptStrFt):\n pass\n\n\n# Floating-point number field type.\nclass _FloatFt(_NumericFt):\n def __init__(self, size):\n self._size = size\n\n # Size (bits): 32 or 64.\n @property\n def size(self):\n return self._size\n\n # Equivalent barectf field type in YAML.\n @property\n def barectf_yaml(self):\n return {\n 'class': 'real',\n 'size': self._size,\n }\n\n # Equivalent C type.\n @property\n def c_type(self):\n if self._size == 32:\n return 'float'\n else:\n assert self._size == 64\n return 'double'\n\n\n# Event record type.\nclass _Ert:\n def __init__(self, api_func_name, members):\n self._api_func_name = api_func_name\n self._members = members\n\n # API function name\n @property\n def api_func_name(self):\n return self._api_func_name\n\n # Parameters of function (list of `_ErtMember`).\n @property\n def members(self):\n return self._members\n\n\n# Beginning event record type.\nclass _BeginErt(_Ert):\n # Name of event record type depending on the API prefix.\n def name(self, api_prefix):\n suffix = '_begin' if api_prefix == 'hsa' else 'Begin'\n return f'{self._api_func_name}{suffix}'\n\n\n# End event record type.\nclass _EndErt(_Ert):\n # Name of event record type depending on the API prefix.\n def name(self, api_prefix):\n suffix = '_end' if api_prefix == 'hsa' else 'End'\n return f'{self._api_func_name}{suffix}'\n\n\n# Event record type member.\nclass _ErtMember:\n def __init__(self, access, member_names, ft):\n self._access = access\n self._member_names = member_names.copy()\n self._ft = ft\n\n # C++ access expression.\n @property\n def access(self):\n return self._access\n\n # List of member names.\n @property\n def member_names(self):\n return self._member_names\n\n # Equivalent field type.\n @property\n def ft(self):\n return self._ft\n\n\n# Makes sure some condition is satisfied, or prints the error message\n# `error_msg` and quits with exit status 1 otherwise.\n#\n# This is an unconditional assertion.\ndef _make_sure(cond, error_msg):\n if not cond:\n print(f'Error: {error_msg}', file=sys.stderr)\n sys.exit(1)\n\n\ndef _enumerator_effective_val(enum_val):\n # Try the value, but this value may be a string (an\n # enumerator/definition).\n val = enum_val.get('value')\n\n if type(val) is int:\n return val\n\n # Try the raw value.\n val = enum_val.get('raw_value')\n\n if val is not None:\n if type(val) is int:\n # Raw value is already an integer.\n return val\n else:\n # Try to parse the raw value string as an integer.\n try:\n return int(val, 0)\n except:\n pass\n\n _make_sure(False,\n f'Cannot get the integral value of enumerator `{enum_val[\"name\"]}`')\n\n\n# Returns the equivalent field type of the C type `c_type`.\ndef _number_ft_from_c_type(cpp_header, c_type):\n # Check for known enumeration.\n m = re.match(r'(?:enum\\s+)?(\\w+)', c_type)\n\n if m:\n size = 32\n\n for enum_info in cpp_header.enums:\n if m.group(1) == enum_info.get('name'):\n # Fill enumeration field type mappings.\n mappings = {\n str(v['name']): _enumerator_effective_val(v)\n for v in enum_info['values']\n }\n\n if len(mappings) == 0:\n return _SIntFt(64)\n\n if max(mappings.values()) >= 2**31 or min(mappings.values()) < -2**31:\n size = 64\n\n _make_sure(len(mappings) > 0, f'Enumeration `{enum_info[\"name\"]}` is empty')\n\n # Create corresponding enumeration field type.\n return _SEnumFt(size, mappings)\n\n # Find corresponding basic field type.\n is_unsigned = 'unsigned' in c_type\n\n if 'long' in c_type:\n if is_unsigned:\n return _UIntFt(64)\n else:\n return _SIntFt(64)\n elif 'short' in c_type:\n if is_unsigned:\n return _UIntFt(16)\n else:\n return _SIntFt(16)\n elif 'char' in c_type:\n if is_unsigned:\n return _UIntFt(8)\n else:\n return _SIntFt(8)\n elif 'float' in c_type:\n return _FloatFt(32)\n elif 'double' in c_type:\n return _FloatFt(64)\n else:\n # Assume `int` (often an unresolved C enumeration).\n if is_unsigned:\n return _UIntFt(32)\n else:\n return _SIntFt(32)\n\n\n# Returns whether or not a property has a pointer type.\ndef _prop_is_pointer(prop, c_type):\n if prop['pointer'] or prop['function_pointer']:\n return True\n\n if prop['array'] and 'array_size' in prop:\n return True\n\n if prop['unresolved']:\n # HSA API function pointers.\n if prop['name'] in ('callback', 'handler'):\n return True\n\n # HIP API function pointers.\n if c_type.endswith('Fn_t'):\n return True\n\n # Check the C type itself.\n if '*' in c_type or '*' in prop.get('raw_type', ''):\n return True\n\n return False\n\n\n# Returns a list of event record type member objects for the structure\n# `struct` considering the initial C++ access expression `access` and\n# member names `member_names`.\ndef _get_ert_members_for_struct(cpp_header, struct, access, member_names):\n members = []\n member_names = member_names.copy()\n member_names.append(None)\n props = struct['properties']['public']\n\n for index, prop in enumerate(props):\n # Property name.\n name = prop['name']\n\n # Member names, access, and C type.\n member_names[-1] = str(name)\n this_access = f'{access}.{name}'\n c_type = prop['type']\n aliases = prop['aliases']\n\n # Skip no type.\n if c_type == '':\n continue\n\n # Skip unnamed or union.\n if name == '' or 'union' in name or re.match(r'\\bunion\\b', c_type):\n continue\n\n # Check for known C type alias.\n while True:\n c_type_alias = cpp_header.typedefs.get(c_type)\n\n if c_type_alias is None:\n break\n\n c_type = c_type_alias\n\n # Check for C string.\n if re.match(r'^((const\\s+char)|(char\\s+const)|char)\\s*\\*$',\n c_type.strip()):\n members.append(_ErtMember(this_access, member_names, _OptStrFt()))\n continue\n\n # Check for pointer.\n if _prop_is_pointer(prop, c_type):\n # Pointer: use numeric value.\n members.append(_ErtMember(this_access, member_names, _PointerFt()))\n continue\n\n # Check for substructure.\n sub_struct = cpp_header.classes.get(c_type)\n\n if sub_struct is None and len(aliases) == 1:\n sub_struct = cpp_header.classes.get(aliases[0])\n\n if sub_struct is not None:\n members += _get_ert_members_for_struct(cpp_header, sub_struct,\n this_access, member_names)\n continue\n\n # Use a basic field type.\n members.append(_ErtMember(this_access, member_names,\n _number_ft_from_c_type(cpp_header, c_type)))\n\n return members\n\n\n# Returns the beginning and end event record type objects for the\n# callback data structure `struct`.\ndef _erts_from_cb_data_struct(api_prefix, cpp_header, retval_info, struct):\n # The location of the `args` union within the nested structures of\n # `struct`.\n args_nested_cls_index = 0\n\n # Create return value members (to be used later).\n if retval_info is not None:\n args_nested_cls_index = 1\n retval_members = {}\n nested_classes = struct['nested_classes']\n _make_sure(len(nested_classes) >= 1,\n f\"Return value union doesn't exist in `{struct['name']}`\")\n retval_union = nested_classes[0]\n\n for prop in retval_union['properties']['public']:\n name = str(prop['name'])\n member = _ErtMember(f'GetApiData().{name}', ['retval'],\n _number_ft_from_c_type(cpp_header, prop['type']))\n retval_members[prop['name']] = member\n\n # Make sure we have everything we need.\n for api_func_name, retval_name in retval_info.items():\n if retval_name is not None:\n _make_sure(retval_name in retval_members,\n f\"Return value union member `{retval_name}` doesn't exist (function {api_func_name}())\")\n\n # Create beginning/end event record type objects.\n begin_erts = []\n end_erts = []\n nested_classes = struct['nested_classes'][args_nested_cls_index]['nested_classes']\n props = struct['nested_classes'][args_nested_cls_index]['properties']['public']\n _make_sure(len(nested_classes) == len(props),\n f'Mismatch between nested structure and member count in `{struct[\"name\"]}`')\n\n for index, prop in enumerate(props):\n # API function name is the name of the member.\n api_func_name = str(prop['name'])\n\n # Get the parameters.\n members = _get_ert_members_for_struct(cpp_header,\n nested_classes[index],\n f'GetApiData().args.{api_func_name}',\n [])\n\n # Append new beginning event record type object.\n begin_erts.append(_BeginErt(api_func_name, members))\n\n # Append new end event record type object if possible.\n ret_members = []\n\n if retval_info is not None:\n retval_type = retval_info.get(api_func_name)\n\n if retval_type is not None:\n ret_members.append(retval_members[retval_type])\n\n end_erts.append(_EndErt(api_func_name, ret_members))\n\n return begin_erts, end_erts\n\n\n# Creates and returns the return value information dictionary.\n#\n# This dictionary maps API function names to the member to get within\n# the callback data structure.\n#\n# This only applies to the HSA API: for other APIs, this function\n# returns `None`.\ndef _get_retval_info(path):\n if 'hsa' not in os.path.basename(path):\n return\n\n retval_info = {}\n cur_api_func_name = None\n\n with open(path) as f:\n for line in f:\n if 'out << \")' in line and cur_api_func_name is not None:\n m = re.search(r'api_data.(\\w+_retval)', line)\n retval_info[cur_api_func_name] = m.group(1) if m else None\n else:\n m = re.search(r'out << \"(hsa_\\w+)\\(\";', line)\n\n if m:\n cur_api_func_name = m.group(1)\n\n return retval_info\n\n\n# Returns a partial barectf data stream type in YAML with the event\n# record types `erts`.\ndef _yaml_dst_from_erts(api_prefix, erts):\n # Base.\n yaml_erts = {}\n yaml_dst = {\n 'event-record-types': yaml_erts,\n }\n\n # Create one event record type per API function.\n for ert in erts:\n # Base.\n yaml_members = []\n yaml_ert = {\n 'payload-field-type': {\n 'class': 'struct',\n 'members': yaml_members,\n },\n }\n\n # Create one structure field type member per member.\n for member in ert.members:\n # barectf doesn't support nested CTF structures, so join\n # individual member names with `__` to flatten.\n yaml_members.append({\n '_' + '__'.join(member.member_names): {\n 'field-type': member.ft.barectf_yaml,\n },\n })\n\n # Add event record type.\n yaml_erts[ert.name(api_prefix)] = yaml_ert\n\n # Convert to YAML.\n return yaml.dump(yaml_dst)\n\n\n# Returns the C++ switch statement which calls the correct barectf\n# tracing function depending on the API function operation ID.\ndef _cpp_switch_statement_from_erts(api_prefix, erts):\n lines = []\n lines.append('switch (GetOp()) {')\n\n for ert in erts:\n lines.append(f' case {api_prefix.upper()}_API_ID_{ert.api_func_name}:')\n lines.append(f' barectf_{api_prefix}_api_trace_{ert.name(api_prefix)}(')\n lines.append(f' &barectf_ctx,')\n lines.append(f' GetThreadId(),')\n lines.append(f' GetQueueId(),')\n lines.append(f' GetAgentId(),')\n lines.append(f' GetCorrelationId(),')\n\n if api_prefix == 'hip':\n lines.append(f' GetKernelName().c_str(),')\n\n if len(ert.members) == 0:\n # Remove last comma.\n lines[-1] = lines[-1].replace(',', '')\n\n for index, member in enumerate(ert.members):\n if type(member.ft) is _OptStrFt:\n # Only dereference C string if not null, otherwise use\n # an empty string.\n lines.append(f' {member.access} ? {member.access} : \"\"')\n elif type(member.ft) is _StrFt:\n lines.append(f' {member.access}')\n else:\n lines.append(f' {member.ft.cast(member.access)}')\n\n if index + 1 < len(ert.members):\n lines[-1] += ','\n\n lines.append(' );')\n lines.append(' break;')\n\n lines.append('}')\n return lines\n\n\n# Processes the complete API header file `path`.\ndef _process_file(api_prefix, path):\n # Create `CppHeader` object.\n try:\n cpp_header = CppHeaderParser.CppHeader(path)\n except CppHeaderParser.CppParseError as exc:\n print(exc, file=sys.stderr)\n sys.exit(1)\n\n # Get return value information dictionary.\n retval_info = _get_retval_info(path)\n\n # Find callback data structure.\n for struct_name, struct in cpp_header.classes.items():\n if re.match(r'^' + api_prefix + r'_api_data\\w+$', struct_name):\n # Process callback data structure.\n begin_erts, end_erts = _erts_from_cb_data_struct(api_prefix,\n cpp_header,\n retval_info,\n struct)\n\n # Write barectf YAML file.\n with open(f'{api_prefix}_erts.yaml', 'w') as f:\n f.write(_yaml_dst_from_erts(api_prefix, begin_erts + end_erts))\n\n # Write C++ code (beginning event record).\n with open(f'{api_prefix}_begin.cpp.i', 'w') as f:\n f.write('\\n'.join(_cpp_switch_statement_from_erts(api_prefix,\n begin_erts)))\n\n # Write C++ code (end event record).\n with open(f'{api_prefix}_end.cpp.i', 'w') as f:\n f.write('\\n'.join(_cpp_switch_statement_from_erts(api_prefix,\n end_erts)))\n\n\nif __name__ == '__main__':\n # Disable `CppHeaderParser` printing to standard output.\n CppHeaderParser.CppHeaderParser.print_warnings = 0\n CppHeaderParser.CppHeaderParser.print_errors = 0\n CppHeaderParser.CppHeaderParser.debug = 0\n CppHeaderParser.CppHeaderParser.debug_trace = 0\n\n # Process the complete API header file.\n _process_file(sys.argv[1], sys.argv[2])\n","repo_name":"ROCm-Developer-Tools/rocprofiler","sub_path":"plugin/ctf/gen_api_files.py","file_name":"gen_api_files.py","file_ext":"py","file_size_in_byte":18549,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"81"} +{"seq_id":"23173752400","text":"from pymongo import DESCENDING\nfrom models import *\nfrom .database import *\n\n#Index\ndef add_index(index: Index):\n stats.insert_one(\n {\"name\": index.name , \"full_name\" : index.full_name ,\"price\": index.price, \"date\": index.date, \"time\": index.time})\n\ndef get_latest_index(index_name):\n # WARNING: This is hardcoded to get first element\n latest_index = stats.find({\"name\": index_name}).sort(\"_id\", DESCENDING)[0]\n\n name = latest_index[\"name\"]\n full_name = latest_index[\"full_name\"]\n price = latest_index[\"price\"]\n date = latest_index[\"date\"]\n time = latest_index[\"time\"]\n\n index = Index(name, full_name, price, date, time)\n return index\n\ndef get_all_latest_index(all_index_names):\n all_indexes = []\n for index_name in all_index_names:\n if (does_index_exist(index_name)):\n index = get_latest_index(index_name)\n all_indexes.append(index)\n print(index.name)\n\n return all_indexes\n\n\ndef does_index_exist(index_name) -> bool:\n index_stats = tuple(stats.find({\"name\": index_name}).clone())\n if (len(index_stats) > 0):\n return True\n else:\n return False\n","repo_name":"m3elabs/Xchange-TG","sub_path":"controller/index_statistics.py","file_name":"index_statistics.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"15054371899","text":"\nimport torch\n\nfrom dataset import *\nfrom train import *\nfrom conf import conf\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(f\"Using {device} device\")\n\ngenerator = Generator().to(device)\nsurface_disc = Discriminator().to(device)\ntexture_disc = Discriminator(1).to(device)\n\nprint(generator)\nprint(surface_disc)\nprint(texture_disc)\n\ntrain(generator, surface_disc, texture_disc, device)\n\nprint(\"Done!\")\n","repo_name":"wychlw/Cartoonization-PyTorch-SSA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73194540105","text":"import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport sys\n\nmesaj=MIMEMultipart()\n\nmesaj[\"From\"] = \"\" #gonderen mail adresi\nmesaj[\"To\"] = \"\" #alici mail adresi\nmesaj[\"Subject\"] = \"Smtp Mail Gönderme\"\n\nyazi=\"\"\"\nsmtp ile gonderdim\n1-2-3----\n\ntaner ozer\n\n\"\"\"\n\nmesajgovdesi=MIMEText(yazi,\"plain\")\nmesaj.attach(mesajgovdesi)\n\ntry:\n mail=smtplib.SMTP(\"smtp.gmail.com\",587)\n mail.ehlo()\n mail.starttls()\n mail.login(\"\",\"\") #mail adresi ve sifremiz\n mail.sendmail(mesaj[\"From\"],mesaj[\"To\"],mesaj.as_string())\n print(\"Mail Gönderildi..\")\n mail.close()\nexcept:\n sys.stderr.write(\"bir hata olustu...\")\n sys.stderr.flush()","repo_name":"dxtaner/Python","sub_path":"smtpilemailgonderme/mailyollama.py","file_name":"mailyollama.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28586610571","text":"from __future__ import print_function\r\n\r\nimport mlflow.sklearn\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Use an existing experiment or create a new one\r\n mlflow.set_experiment(\"tracking_experiment\")\r\n\r\n with mlflow.start_run() as active_run:\r\n\r\n X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)\r\n y = np.array([0, 0, 1, 1, 1, 0])\r\n lr = LogisticRegression()\r\n lr.fit(X, y)\r\n score = lr.score(X, y)\r\n\r\n # Log several metrics\r\n for i in range(10):\r\n mlflow.log_metric(\"i mod 2\", i % 2)\r\n\r\n print(\"Score: %s\" % score)\r\n mlflow.log_param(\"random\", np.random.rand())\r\n mlflow.log_metric(\"score\", score)\r\n mlflow.sklearn.log_model(lr, \"model\")\r\n print(\"Model saved in run %s\" % active_run.info.run_uuid)\r\n","repo_name":"TheAmazingElys/random_scripts_and_notebooks","sub_path":"examples/mlflow/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16575252248","text":"\"\"\"\nCharacter Detection\n\nThe goal is to find all of the coordinates where a specific character appears using template matching.\n\nThere are 3 sub tasks:\n1. Detect character 'a'.\n2. Detect character 'b'.\n3. Detect character 'c'.\n\"\"\"\n\nimport argparse\nimport json\nimport os\n\nimport utils\nfrom task1 import *\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"edge-character detection\")\n parser.add_argument( \"--img_path\", \n type=str, \n default=\"./data/characters.jpg\",\n help=\"path to the image used for character detection\")\n parser.add_argument( \"--template_path\", \n type=str, \n default=\"\",\n choices=[\"./data/a.jpg\", \"./data/b.jpg\", \"./data/c.jpg\"],\n help=\"path to the template image\")\n parser.add_argument( \"--result_saving_directory\",\n dest=\"rs_directory\",\n type=str,\n default=\"./results/\",\n help=\"directory to which results are saved\")\n args = parser.parse_args()\n return args\n\ndef detect(img, template):\n \"\"\"\n Detect a given character, i.e., the character in the template image.\n\n Args:\n img: nested list (int), image that contains character to be detected.\n template: nested list (int), template image.\n\n Returns:\n coordinates: list (tuple), a list whose elements are coordinates where the character appears.\n format of the tuple: (x (int), y (int)), x and y are integers.\n x: row that the character appears (starts from 0).\n y: column that the character appears (starts from 0).\n \"\"\"\n\n template_h = len(template)\n template_w = max([len(i) for i in template])\n\n image_h = len(img)\n image_w = max([len(i) for i in img])\n\n output = []\n threshold = 0.85\n\n for i in range(image_h - template_h):\n temp = []\n for j in range(image_w - template_w):\n cropped_img = img[i: i + template_h]\n cropped_img = [x[j: j+template_w] for x in cropped_img]\n\n i_0_bar = sum([sum(x) for x in template])/(template_h * template_w)\n\n i_1_bar = sum([sum(y) for y in cropped_img])/(template_h * template_w)\n\n template_sigma = (sum([sum(v) for v in [[(w-i_0_bar)**2 for w in x] for x in template]])) ** 0.5\n\n image_sigma = (sum([sum(v) for v in [[(w-i_1_bar)**2 for w in x] for x in cropped_img]])) ** 0.5\n\n '''Normalized cross correlation calculation'''\n e_ncc = sum([(template[x][w] - i_0_bar) * (cropped_img[x][w] - i_1_bar) for w in range(template_w) for x in\n range(template_h)]) / (template_sigma * image_sigma)\n\n if str(e_ncc) == 'nan':\n temp.append(0)\n else:\n temp.append(e_ncc)\n output.append(temp)\n\n coordinates = []\n for x in range(len(output)):\n for y in range(len(output[0])):\n coordinates_temp = []\n if output[x][y] >= threshold:\n coordinates_temp.append(x)\n coordinates_temp.append(y)\n coordinates.append(coordinates_temp)\n \n return coordinates\n\ndef save_results(coordinates, template, template_name, rs_directory):\n results = {}\n results[\"coordinates\"] = sorted(coordinates, key=lambda x: x[0])\n results[\"template_size\"] = (len(template), len(template[0]))\n with open(os.path.join(rs_directory, template_name), \"w\") as file:\n json.dump(results, file)\n\ndef main():\n args = parse_args()\n\n img = read_image(args.img_path)\n template = read_image(args.template_path)\n\n coordinates = detect(img, template)\n\n template_name = \"{}.json\".format(os.path.splitext(os.path.split(args.template_path)[1])[0])\n save_results(coordinates, template, template_name, args.rs_directory)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sivashanmugamo/Computer-Vision","sub_path":"Edge & Character detection/Character Detection.py","file_name":"Character Detection.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22676094153","text":"from ..baseAPI import BasePlatformAPI\nfrom lazy.protectedModels import ProtectedResource\nfrom etsy2 import Etsy\nfrom etsy2.oauth import EtsyOAuthClient, EtsyOAuthHelper\nfrom ... import productDBLogger #\nfrom urllib.parse import parse_qs, urlparse\nfrom . import models\n\nrequiredPermissions = [\"listings_r\", \"listings_w\"]\nconsumer_key = \"l4opy054xmz7lolo6x68ot1k\"\nconsumer_secret = \"wn1etzuu54\"\nshop_id = \"12703209\"\n\n\nclass EtsyAPI(BasePlatformAPI):\n persistent_identifier = \"etsy\"\n webhook_enabled = False\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n etsyAuthToken = ProtectedResource.objects(name=\"etsyAuthToken\").first()\n etsyAuthSecret = ProtectedResource.objects(name=\"etsyAuthSecret\").first()\n if etsyAuthToken is None or etsyAuthSecret is None:\n loginURL, temp_oauth_token_secret = EtsyOAuthHelper.get_request_url_and_token_secret(consumer_key, consumer_secret, requiredPermissions)\n temp_oauth_token = parse_qs(urlparse(loginURL).query).get(\"oauth_token\").pop()\n productDBLogger.warn(\"Etsy is not authenticated!!! Visit this URL and input the verification code to authenticate!\")\n productDBLogger.warn(loginURL)\n productDBLogger.warn(temp_oauth_token)\n productDBLogger.warn(temp_oauth_token_secret)\n verificationCode = input(\"Verification Code> \")\n oauth_token, oauth_token_secret = EtsyOAuthHelper.get_oauth_token_via_verifier(consumer_key, consumer_secret, temp_oauth_token, temp_oauth_token_secret, verificationCode)\n etsyAuthToken = ProtectedResource(name=\"etsyAuthToken\", value=oauth_token)\n etsyAuthSecret = ProtectedResource(name=\"etsyAuthSecret\", value=oauth_token_secret)\n etsyAuthToken.save()\n etsyAuthSecret.save()\n etsyOAuthClient = EtsyOAuthClient(\n client_key=consumer_key,\n client_secret=consumer_secret,\n resource_owner_key=etsyAuthToken.value,\n resource_owner_secret=etsyAuthSecret.value\n )\n self.EtsyClient = Etsy(etsy_oauth_client=etsyOAuthClient)\n newEtsyListing = models.EtsyParityRecord(listingType=\"foo\", listingID=\"738914494\", productID=\"3779581207\")\n print(newEtsyListing.getRawListingProductsJSON(self.EtsyClient))\n print(newEtsyListing.pushQuantityToEtsy(10, self.EtsyClient))\n # print(self._getListing(\"738914494\"))\n exit()\n\n def getAllStockCounts(self):\n pass\n\n def _bulkFetchListings(self):\n finishedReading = False\n totalAmountOfResourcesFetched = 0\n page = 1\n limit = 100\n fetchedResourceJSONList = list()\n while not finishedReading:\n responseJSON = self.EtsyClient.findAllShopListingsActive(shop_id=shop_id, limit=limit, page=page)\n totalAmountOfResourcesOnEtsy = self.EtsyClient.count\n totalAmountOfResourcesFetched += len(responseJSON)\n fetchedResourceJSONList = fetchedResourceJSONList + responseJSON\n if totalAmountOfResourcesOnEtsy == totalAmountOfResourcesFetched:\n finishedReading = True\n else:\n page += 1\n finishedReading = False\n return fetchedResourceJSONList\n\n def _getListing(self, listing_id):\n responseJSON = self.EtsyClient.getListing(listing_id=listing_id)\n return responseJSON\n","repo_name":"vyrzdev/Dot2DotProductManager","sub_path":"apis/etsy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30975726008","text":"\"\"\"\nterm_processing_script.py\n~~~\nTakes in a csv file and extracts numerical values for most attributes of a credit card.\n\"\"\"\n\nimport csv\nfrom scrape_to_dict.card_schema import card_dict\nfrom term_processor import clean_up_terms, second_clean\nfrom scripts.convert_csv import convert_csv\n\nfrom typing import List\nimport copy\n\n# source_csv = \"csv_filescredit_card_raw_scraped - credit_card_raw_all.csv\"\nsource_csv = \"csv_files/credit_card_raw_scraped.csv\"\n\n\ndef __store_attribute_string_to_dict(attribute_list: List[str]) -> dict:\n \"\"\"\n Takes a list of attributes of a card and stores it in a dictionary, which gets returned.\n\n Args:\n attribute_list (List[str]): The list of attribute data about a card.\n Returns:\n dict: The dictionary (card_dict from card_schema) of attributes and its corresponding string and value.\n \"\"\"\n answer_dict = copy.deepcopy(card_dict)\n if \"tips_apr\" in answer_dict.keys():\n del(answer_dict[\"tips_apr\"])\n if \"termination\" in answer_dict.keys():\n del(answer_dict[\"termination\"])\n if \"plan_fee\" in answer_dict.keys():\n del (answer_dict[\"plan_fee\"])\n\n answer_dict[\"full_card_name\"] = attribute_list[0].strip()\n answer_dict[\"short_card_name\"] = attribute_list[1].strip()\n answer_dict[\"trademark_card_name\"] = attribute_list[2].strip()\n answer_dict[\"category\"] = attribute_list[3].strip()\n answer_dict[\"issuer\"] = attribute_list[4].strip()\n answer_dict[\"processor\"] = attribute_list[5].strip()\n answer_dict[\"toc_link\"] = attribute_list[6].strip()\n answer_dict[\"offer_link\"] = attribute_list[7].strip()\n answer_dict[\"agg_link\"] = attribute_list[8].strip()\n answer_dict[\"balance_transfer_apr\"][\"term\"] = attribute_list[9].strip()\n answer_dict[\"cash_advance_apr\"][\"term\"] = attribute_list[12].strip()\n answer_dict[\"penalty_apr\"][\"term\"] = attribute_list[15].strip()\n answer_dict[\"purchase_apr\"][\"term\"] = attribute_list[18].strip()\n answer_dict[\"paying_interest\"][\"term\"] = attribute_list[21].strip()\n answer_dict[\"minimum_interest_charge_apr\"][\"term\"] = attribute_list[23].strip()\n answer_dict[\"annual_fee\"][\"term\"] = attribute_list[26].strip()\n answer_dict[\"balance_transfer_fee\"][\"term\"] = attribute_list[29].strip()\n answer_dict[\"cash_advance_fee\"][\"term\"] = attribute_list[32].strip()\n answer_dict[\"foreign_transaction_fee\"][\"term\"] = attribute_list[35].strip()\n answer_dict[\"late_payment_fee\"][\"term\"] = attribute_list[38].strip()\n answer_dict[\"returned_payment_fee\"][\"term\"] = attribute_list[41].strip()\n answer_dict[\"returned_check_fee\"][\"term\"] = attribute_list[44].strip()\n answer_dict[\"over_limit_fee\"][\"term\"] = attribute_list[47].strip()\n answer_dict[\"pros\"][\"term\"] = attribute_list[50].strip()\n answer_dict[\"cons\"][\"term\"] = attribute_list[52].strip()\n answer_dict[\"credit_score\"][\"term\"] = attribute_list[54].strip()\n answer_dict[\"bonus_offer\"][\"term\"] = attribute_list[58].strip()\n answer_dict[\"offer_details\"][\"term\"] = attribute_list[60].strip()\n answer_dict[\"rewards_rate\"][\"term\"] = attribute_list[62].strip()\n answer_dict[\"intro_apr_check\"][\"term\"] = attribute_list[64].strip()\n answer_dict[\"variable_apr_check\"][\"term\"] = attribute_list[66].strip()\n answer_dict[\"annual_fee_check\"][\"term\"] = attribute_list[68].strip()\n\n return answer_dict\n\n\ndef term_process(dest_csv: str, starting_index: int):\n \"\"\"\n Function to term process and extract numerical values from scraped data.\n\n Args:\n dest_csv (str): The destination csv file.\n starting_index (int): The starting index.\n Returns:\n str: Success\n \"\"\"\n with open(source_csv, 'r', newline='') as csv_f:\n csv_reader = list(csv.reader(csv_f))\n for row in csv_reader[starting_index:]:\n attribute_data = list(row)\n print([str(i) + ' ' + str(j) for i, j in enumerate(attribute_data)])\n attribute_dict = __store_attribute_string_to_dict(attribute_data)\n processed_dict = clean_up_terms.clean_up_terms(attribute_dict)\n processed_dict = second_clean.second_clean(processed_dict)\n print(processed_dict)\n convert_csv(processed_dict, dest_csv)\n\n return \"Success\"\n\n\nterm_process(\"csv_files/credit_card_raw_processed.csv\", 0)\n\n","repo_name":"eric99ying/CreditCardScraper","sub_path":"scripts/term_processing_script.py","file_name":"term_processing_script.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"43556944862","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom numpy import *\n\ndef loadDataSet():\n\tdataMat = []\n\tlabelMat = []\n\tfr = open('testSet.txt')\n\tfor line in fr:\n\t\tlineArr = line.strip().split()\n\t\t# dataMat 记录的是每个点和他们的初始的权重\n\t\tdataMat.append([1.0 , float(lineArr[0]) , float(lineArr[1])])\n\t\tlabelMat.append(int(lineArr[2]))\n\treturn dataMat , labelMat\n\ndef sigmoid(inX):\n\treturn 1.0/(1+exp(-inX))\n\n#梯度上升算法\ndef gradAscent(dataMatIn , classLabels):\n\tdataMatrix = mat(dataMatIn)\n\tlabelMat = mat(classLabels).transpose()\n\t# m,n 是 100 3\n\tm , n = shape(dataMatrix)\n\talpha = 0.001\n\tmaxCycles = 500\n\tweights = ones((n,1))\n\tfor k in range(maxCycles):\n\t\t# h 和 error 都是向量\n\t\th = sigmoid(dataMatrix * weights)\n\t\terror = (labelMat - h)\n\t\tweights = weights + alpha * dataMatrix.transpose() * error\n\treturn weights\n\n#随机梯度上升\ndef stocGradAscent0(dataMatrix , classLabels):\n\tm , n =shape(dataMatrix)\n\talpha = 0.01\n\tweights = ones(n)\n\tfor i in range(m):\n\t\t# h和error都是数值\n\t\th = sigmoid(sum(dataMatrix[i] * weights))\n\t\terror = classLabels[i] - h\n\t\tweights = weights + alpha * error * dataMatrix[i]\n\treturn weights\n\n#改进的随机梯度上升\ndef stocGradAscent1(dataMatrix , classLabels , numIter = 150):\n\tm , n = shape(dataMatrix)\n\tweights = ones(n)\n\tfor j in range(numIter):\n\t\tdataIndex = range(m)\n\t\tfor i in range(m):\n\t\t\talpha = 4/(1.0 + i + j) + 0.01\n\t\t\trandIndex = int(random.uniform(0 , len(dataIndex)))\n\t\t\th = sigmoid(sum(dataMatrix[randIndex] * weights))\n\t\t\terror = classLabels[randIndex] - h\n\t\t\tweights = weights + alpha * error * dataMatrix[randIndex]\n\t\t\tdel(dataIndex[randIndex])\n\treturn weights;\n\ndef classifyVector(inX , weights):\n\tprob = sigmoid(sum(inX * weights))\n\tif prob > 0.5:\n\t\treturn 1\n\telse:\n\t\treturn 0","repo_name":"baiyyang/MachineLearning_python","sub_path":"classification/LogRegres.py","file_name":"LogRegres.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"44094627436","text":"import argparse\nimport logging\nimport sys\nimport yaml\nimport os\nimport pkg_resources\n\nfrom . import constants\n\nfrom .colors import MessageColors\nfrom .exceptions import DefinitionError\nfrom .main import AnsibleBuilder\nfrom .introspect import process, simple_combine, base_collections_path\nfrom .requirements import sanitize_requirements\nfrom .utils import configure_logger, write_file\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef run():\n args = parse_args()\n configure_logger(args.verbosity)\n\n if args.action in ['create', 'build']:\n ab = AnsibleBuilder(**vars(args))\n action = getattr(ab, ab.action)\n try:\n if action():\n print(\n MessageColors.OKGREEN + \"Complete! The build context can be found at: {0}\".format(\n os.path.abspath(ab.build_context)\n ) + MessageColors.ENDC)\n sys.exit(0)\n except DefinitionError as e:\n logger.error(e.args[0])\n sys.exit(1)\n\n elif args.action == 'introspect':\n data = process(args.folder, user_pip=args.user_pip, user_bindep=args.user_bindep)\n if args.sanitize:\n logger.info('# Sanitized dependencies for {0}'.format(args.folder))\n data_for_write = data\n data['python'] = sanitize_requirements(data['python'])\n data['system'] = simple_combine(data['system'])\n else:\n logger.info('# Dependency data for {0}'.format(args.folder))\n data_for_write = data.copy()\n data_for_write['python'] = simple_combine(data['python'])\n data_for_write['system'] = simple_combine(data['system'])\n\n print('---')\n print(yaml.dump(data, default_flow_style=False))\n\n if args.write_pip and data.get('python'):\n write_file(args.write_pip, data_for_write.get('python') + [''])\n if args.write_bindep and data.get('system'):\n write_file(args.write_bindep, data_for_write.get('system') + [''])\n\n sys.exit(0)\n\n logger.error(\"An error has occured.\")\n sys.exit(1)\n\n\ndef get_version():\n return pkg_resources.get_distribution('ansible_builder').version\n\n\ndef add_container_options(parser):\n \"\"\"\n Add sub-commands and options relevant to containers.\n \"\"\"\n create_command_parser = parser.add_parser(\n 'create',\n help='Creates a build context, which can be used by podman to build an image.',\n description=(\n 'Creates a build context (including a Containerfile) from an execution environment spec. '\n 'This build context is populated with dependencies including requirements files.'\n )\n )\n\n build_command_parser = parser.add_parser(\n 'build',\n help='Builds a container image.',\n description=(\n 'Creates a build context (including a Containerfile) from an execution environment spec. '\n 'The build context will be populated from the execution environment spec. '\n 'After that, the specified container runtime podman/docker will be invoked to '\n 'build an image from that definition. '\n 'After building the image, it can be used locally or published using the supplied tag.'\n )\n )\n\n # Because of the way argparse works, if we specify the default here, it would\n # always be included in the value list if a tag value was supplied. We don't want\n # that, so we must, instead, set the default AFTER the argparse.parse_args() call.\n # See https://bugs.python.org/issue16399 for more info.\n build_command_parser.add_argument(\n '-t', '--tag',\n action='extend',\n nargs='+',\n help=f'The name(s) for the container image being built (default: {constants.default_tag})')\n\n build_command_parser.add_argument(\n '--container-runtime',\n choices=list(constants.runtime_files.keys()),\n default=constants.default_container_runtime,\n help='Specifies which container runtime to use (default: %(default)s)')\n\n build_command_parser.add_argument(\n '--build-arg',\n action=BuildArgAction,\n default={},\n dest='build_args',\n help='Build-time variables to pass to any podman or docker calls. '\n 'Internally ansible-builder makes use of {0}.'.format(\n ', '.join(constants.build_arg_defaults.keys())))\n\n build_command_parser.add_argument(\n '--no-cache',\n action='store_true',\n help='Do not use cache when building the image',\n )\n\n build_command_parser.add_argument(\n '--prune-images',\n action='store_true',\n help='Remove all dangling images after building the image',\n )\n\n for p in [create_command_parser, build_command_parser]:\n\n p.add_argument('-f', '--file',\n default=constants.default_file,\n dest='filename',\n help='The definition of the execution environment (default: %(default)s)')\n\n p.add_argument('-c', '--context',\n default=constants.default_build_context,\n dest='build_context',\n help='The directory to use for the build context (default: %(default)s)')\n\n p.add_argument('--output-filename',\n choices=list(constants.runtime_files.values()),\n default=None,\n help='Name of file to write image definition to '\n '(default depends on --container-runtime, {0})'.format(\n ' and '.join([' for '.join([v, k]) for k, v in constants.runtime_files.items()]))\n )\n\n p.add_argument('--galaxy-keyring',\n help='Keyring for collection signature verification during installs from Galaxy. '\n 'Will be copied into images. Verification is disabled if unset.')\n p.add_argument('--galaxy-ignore-signature-status-codes',\n action=\"append\",\n help='A gpg status code to ignore during signature verification when installing with '\n 'ansible-galaxy. May be specified multiple times. See ansible-galaxy doc for more info.')\n p.add_argument('--galaxy-required-valid-signature-count',\n help='The number of signatures that must successfully verify collections from '\n 'ansible-galaxy ~if there are any signatures provided~. See ansible-galaxy doc for more info.')\n\n introspect_parser = parser.add_parser(\n 'introspect',\n help='Introspects collections in folder.',\n description=(\n 'Loops over collections in folder and returns data about dependencies. '\n 'This is used internally and exposed here for verification. '\n 'This is targeted toward collection authors and maintainers.'\n )\n )\n introspect_parser.add_argument('--sanitize', action='store_true',\n help=('Sanitize and de-duplicate requirements. '\n 'This is normally done separately from the introspect script, but this '\n 'option is given to more accurately test collection content.'))\n\n introspect_parser.add_argument(\n 'folder', default=base_collections_path, nargs='?',\n help=(\n 'Ansible collections path(s) to introspect. '\n 'This should have a folder named ansible_collections inside of it.'\n )\n )\n # Combine user requirements and collection requirements into single file\n # in the future, could look into passing multilple files to\n # python-builder scripts to be fed multiple files as opposed to this\n introspect_parser.add_argument(\n '--user-pip', dest='user_pip',\n help='An additional file to combine with collection pip requirements.'\n )\n introspect_parser.add_argument(\n '--user-bindep', dest='user_bindep',\n help='An additional file to combine with collection bindep requirements.'\n )\n introspect_parser.add_argument(\n '--write-pip', dest='write_pip',\n help='Write the combined bindep file to this location.'\n )\n introspect_parser.add_argument(\n '--write-bindep', dest='write_bindep',\n help='Write the combined bindep file to this location.'\n )\n\n for n in [create_command_parser, build_command_parser, introspect_parser]:\n\n n.add_argument('-v', '--verbosity',\n dest='verbosity',\n type=int,\n choices=[0, 1, 2, 3],\n default=constants.default_verbosity,\n help='Increase the output verbosity, for up to three levels of verbosity '\n '(invoked via \"--verbosity\" or \"-v\" followed by an integer ranging '\n 'in value from 0 to 3) (default: %(default)s)')\n\n\ndef parse_args(args=sys.argv[1:]):\n\n parser = argparse.ArgumentParser(\n prog='ansible-builder',\n description=(\n 'Tooling to help build container images for running Ansible content. '\n 'Get started by looking at the help text for one of the subcommands.'\n )\n )\n parser.add_argument(\n '--version', action='version', version=get_version(),\n help='Print ansible-builder version and exit.'\n )\n\n subparsers = parser.add_subparsers(help='The command to invoke.', dest='action')\n subparsers.required = True\n\n add_container_options(subparsers)\n\n args = parser.parse_args(args)\n\n # Tag default must be handled differently. See comment for --tag option.\n if 'tag' not in vars(args):\n args.tag = [constants.default_tag]\n\n return args\n\n\nclass BuildArgAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n key, *value = values.split('=')\n attr = getattr(namespace, self.dest)\n\n # None signifies that the build-arg will come from the environment.\n # This is currently only supported by Docker. Podman will treat any\n # usage of the $VALUE as a literal string.\n if value:\n attr[key] = value[0]\n else:\n attr[key] = None\n","repo_name":"antuelle78/ansible-builder-awxee","sub_path":"builder/lib/python3.10/site-packages/ansible_builder/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":10327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"503357585","text":"import glob\ndef align_Corpus(src_path,trg_path):\n\n # Read in the source and target files\n src_files = glob.glob(src_path)\n trg_files = glob.glob(trg_path)\n\n # Create a dictionary to store the sentence pairs\n sent_pairs = {}\n\n # Loop over the files and read in the sentence pairs\n for src_file, trg_file in zip(src_files, trg_files):\n with open(src_file, encoding='utf-8') as fsrc, open(trg_file, encoding='utf-8') as ftrg:\n for src_sent, trg_sent in zip(fsrc, ftrg):\n src_sent = src_sent.strip()\n trg_sent = trg_sent.strip()\n sent_pairs.setdefault(src_sent, []).append(trg_sent)\n\n # Write out the aligned sentence pairs to a new file\n with open('aligned_corpus.txt', 'a', encoding='utf-8') as fout:\n for src_sent, trg_sents in sent_pairs.items():\n for trg_sent in trg_sents:\n # print(f'{src_sent}\\t{trg_sent}\\n')\n fout.write(f'{src_sent}\\n{trg_sent}\\n\\n',)\n\n return 0\n\nalignmet=align_Corpus('corpus/am.txt','corpus/en.txt')\nalignmet=align_Corpus('corpus/Amharic_English_E-Bible/amharic.txt','corpus/Amharic_English_E-Bible/english.txt')\nalignmet=align_Corpus('corpus/Amharic_English_Ethiopic_Bible/amharic.txt','corpus/Amharic_English_Ethiopic_Bible/english.txt')\nalignmet=align_Corpus('corpus/Amharic_English_History/amharic.txt','corpus/Amharic_English_History/english.txt')\nalignmet=align_Corpus('corpus/Amharic_English_JW_Bible/amharic.txt','corpus/Amharic_English_JW_Bible/english.txt')\nalignmet=align_Corpus('corpus/Amharic_English_JW_Daily_Quote/amharic.txt','corpus/Amharic_English_JW_Daily_Quote/english.txt')\nalignmet=align_Corpus('corpus/Amharic_English_Legal/amharic.txt','corpus/Amharic_English_Legal/english.txt')\nalignmet=align_Corpus('corpus/Amharic_English_News/amharic.txt','corpus/Amharic_English_News/english.txt')\nprint(\"done making Corpus\")\n\nimport nltk\nfrom nltk.translate import AlignedSent\nfrom nltk.translate import IBMModel1\n\n# Load the corpus text file\nwith open('aligned_corpus.txt', 'r',encoding='utf-8') as f:\n corpus_text = f.read()\n\n# Preprocess the corpus\npreprocessed_corpus = []\nfor line in corpus_text.split('\\n\\n'):\n if '\\n' in line:\n am_sent, en_sent = line.split('\\n')\n am_words = nltk.word_tokenize(am_sent.lower())\n en_words = nltk.word_tokenize(en_sent.lower())\n preprocessed_corpus.append(AlignedSent(am_words, en_words))\n\n# Split the corpus into training and testing sets\ntrain_data = preprocessed_corpus\n# test_data = preprocessed_corpus[1000:]\n\n# Train an IBM Model 1 on the training data\nibm1 = IBMModel1(train_data, 5)\n\n# Use the trained model to translate a new sentence\nam_sent = [\"\"]\n# en_sent = ibm1.translate(am_sent.split())\nprint(en_sent)\n\n# Evaluate the performance of the translation model on the test data\n# scores = ibm1.evaluate(test_data)\n# print(\"IBM Model 1 BLEU score:\", scores)\nsent=''\nwhile sent!='exit':\n sent = input(\"Enter Amharic Sentence : \") \n amharic_text=sent.split() \n englishh_text=\"\" \n for english_word in amharic_text: \n # english_word = \"This\" \n print(english_word) \n translations = ibm1.translation_table.get(english_word) \n # print(\"-----------------------------------------\") \n # print('Transalation===',translations) \n if translations: \n max_translation = max(translations.items(), key=lambda x: x[1]) \n eng_word = max_translation[0] \n englishh_text=englishh_text+\" \"+(eng_word) \n print(eng_word) \n else: \n print(f\"No translations available for {english_word}\") \n print('English Senterce: ',englishh_text)","repo_name":"composureR3j3c/NTLK","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33935428093","text":"from transformers import pipeline\nimport time\n\nmodel_list = [\n \"bigscience/bloom-560m\",\n # \"bigscience/bloom-1b1\"\n]\n\ninput_sentense = [\n \"GPUs play an important role in\",\n # \"Currently, AI practitioners have very limited flexibility when choosing a high-performance\",\n # \"A machine learning system designed for one technology provider’s GPU must be completely reimplemented in order to work on a different provider’s hardware. This lack\"\n]\ndevice_idx = 0\ntop_k = 1\n#token_lengths = [25,50,100,200,300,400,500,600,700,800]\ntoken_lengths = [20]\ndo_sample_flag = True\ntoken_length=100\n\ndef time_eclapsed(token_length, sentense, model, device):\n # start_time = time.perf_counter()\n if device == 'CPU':\n generator = pipeline('text-generation', model=model, max_new_tokens=token_length+5, min_new_tokens=token_length, do_sample=do_sample_flag, top_k=top_k)\n start_time = time.perf_counter()\n response = generator(sentense)\n #print(response[0][\"generated_text\"])\n elif device == 'GPU':\n generator = pipeline('text-generation', model=model, max_new_tokens=token_length+5, min_new_tokens=token_length, do_sample=do_sample_flag, top_k=top_k, device=device_idx)\n start_time = time.perf_counter()\n response = generator(sentense)\n #print(response[0][\"generated_text\"])\n else:\n print(\"error: should specify CPU or GPU option before calling\")\n return -1\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n print(str(device) + \" Elapsed time: \" + \"for \" + str(model) + \" is \", elapsed_time)\n\n return elapsed_time\n\nfor s in input_sentense:\n for t in token_lengths:\n GPU_time = time_eclapsed(t, s, model_list[0], \"GPU\")\n CPU_time = time_eclapsed(t, s, model_list[0], \"CPU\")\n print(\"speedup for: token = \" + str(t) + \", model = \"+ model_list[0] + \", speedup = \" + str(CPU_time/GPU_time))\n\n","repo_name":"s0897918/xcosdaem495","sub_path":"transformers/BLOOM/bigscience_bloom.py","file_name":"bigscience_bloom.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9227740610","text":"# this script exports a selected object (mesh) and its armature and a current action\n\n# limitations:\n# exports a single mesh and a single action\n# mesh is expected to have triangular faces (polygons), each bone must have loc/rot channels and\n# each channel of each bone must have the same number of keyframe_points in the current action\n# bone scale transformations are not exported\n# probably all mesh transforms and modifiers (except armature) should be applied before running this script\n# keyframe_points are expected to be evenly spaced in a time domain\n# animation duration in seconds is not exported\n# todo: bones which don't affect skin are exported, this is not necessary\n# todo: bones with no animation are also exported\n\nimport bpy\nimport mathutils\n\ndef register_bone(bones, bone):\n bones.append(bone)\n for child in bone.children:\n register_bone(bones, child)\n\nobject = bpy.context.selected_objects[0]\nassert object\nassert object.type == 'MESH'\narmature_object = object.find_armature()\nassert armature_object\nmesh = object.data\narmature = armature_object.data\nvertex_groups = object.vertex_groups\nbones = []\nassert armature.bones[0].parent == None # root bone\n\nfor bone in armature.bones:\n register_bone(bones, bone)\n\naction = object.animation_data.action\nassert action\ngroup_bone_id_map = {}\n\nfor group in vertex_groups:\n bone_id = None\n \n for id, bone in enumerate(bones):\n if(bone.name == group.name):\n bone_id = id\n break\n assert bone_id != None\n group_bone_id_map[group.index] = bone_id\n\nf = open('anim_data', 'w')\n\n# positions\n\nfor vert in mesh.vertices:\n print('v ', vert.co.x, vert.co.y, vert.co.z, file=f)\n\n# normals\n\nfor vert in mesh.vertices:\n print('n ', vert.normal.x, vert.normal.y, vert.normal.z, file=f)\n \n# weights / bone ids\n\nfor vert in mesh.vertices:\n weights = []\n \n for group in vert.groups:\n weights.append( (group_bone_id_map[group.group], group.weight) )\n \n assert len(weights) > 0\n weights = sorted(weights, key=lambda tup: tup[1], reverse=True)\n num_append = 4 - len(weights)\n \n for i in range(num_append):\n weights.append( (0,0) )\n \n if len(weights) > 4:\n weights = weights[0:4]\n \n mod = 0\n \n for tup in weights:\n mod += tup[1]\n \n for i, tup in enumerate(weights):\n weights[i] = (tup[0], tup[1] / mod)\n \n print('w', end=' ', file=f)\n \n for tup in weights:\n print(tup[0], tup[1], end=' ', file=f) \n \n print(file=f)\n\n# faces\n\nfor poly in mesh.polygons:\n print('f', end=' ', file=f)\n # todo, triangulate mesh\n assert len(poly.vertices) == 3\n \n for vert_id in poly.vertices:\n print(vert_id, end=' ', file=f)\n \n print(file=f)\n\n# bones\n# note: matrices in blender api are stored row-wise and vectors are treated as column vectors\n\nmatrices_parent_from_bone = []\n\nfor bone in bones:\n print('b', end=' ', file=f)\n parent_id = 0\n parent_from_bone = bone.matrix_local # matrix_local transformation: bp_mesh_from_bone (bp - bind pose)\n \n #if not a root bone\n if(bone.parent):\n parent_id = bones.index(bone.parent)\n tmp = bone.parent.matrix_local.copy()\n tmp.invert()\n # tmp is now: parent_from_bp_model\n parent_from_bone = tmp @ bone.matrix_local\n \n matrices_parent_from_bone.append(parent_from_bone)\n print(parent_id, end=' ', file=f)\n \n for row in bone.matrix_local:\n for i in range(4):\n print(row[i], end=' ', file=f)\n \n print(file=f)\n\n# animation (blender action)\n\nsample_count = len(action.fcurves[0].keyframe_points)\n\nfor bone_id, bone in enumerate(bones):\n channels = []\n \n for fcurve in action.fcurves:\n if bone.name == fcurve.group.name:\n channels.append(fcurve.keyframe_points)\n \n assert len(channels) >= 7 # 3 location channels and 4 rotation channels\n assert len(channels[0]) == sample_count\n assert len(channels[3]) == sample_count\n parent_from_bone = matrices_parent_from_bone[bone_id]\n \n for i in range(sample_count):\n print('s', end=' ', file=f)\n loc = mathutils.Vector( (channels[0][i].co.y, channels[1][i].co.y, channels[2][i].co.y) )\n rot = mathutils.Quaternion( (channels[3][i].co.y, channels[4][i].co.y, channels[5][i].co.y, channels[6][i].co.y) )\n bone_transform = mathutils.Matrix.Translation(loc) @ mathutils.Matrix.to_4x4( rot.to_matrix() )\n assert bone_transform.row[3].w == 1 # test if to_4x4 returns a correct matrix\n parent_from_bone2 = parent_from_bone @ bone_transform\n loc2, rot2, sc2 = parent_from_bone2.decompose()\n print(loc2.x, loc2.y, loc2.z, rot2.w, rot2.x, rot2.y, rot2.z, file=f)\nf.close()\n","repo_name":"matiTechno/various","sub_path":"graphics/skeletal/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8505698878","text":"import logging\nimport math\nimport random\n\nclass Layer:\n\n def __init__(self):\n\n self.velocity = float(1)\n self.gain = float(0)\n self.sample = int(0)\n\n\nclass Target:\n\n def __init__(self):\n\n self.channel = int(0)\n self.gain = float(0)\n self.adsr = (float(0), float(0), float(0), float(0))\n self.layers = []\n\n\nclass Technique:\n\n def __init__(self):\n\n self.midi = []\n self.targets = []\n\n\nclass Instrument:\n\n def __init__(self):\n\n self.techniques = []\n\n\nclass Drumkit:\n\n def __init__(self):\n\n self.instruments = []\n\n\nclass Sampler:\n\n def __init__(self):\n\n self.drumkit = Drumkit()\n # MIDI note to technique mapping\n self.MIDI = {}\n # samples map\n self.samples = {}\n # logging\n self.logger = logging.getLogger(\"sampler\")\n\n # methods for loader\n\n def createInstrument(self, dummy):\n \n # relatives\n drumkit = self.drumkit\n # new instrument\n instrument = Instrument()\n drumkit.instruments += [instrument]\n # return index\n return float(len(drumkit.instruments) - 1)\n\n\n def createTechnique(self, instrument_n):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n # new technique\n technique = Technique()\n instrument.techniques += [technique]\n # return index\n return float(len(instrument.techniques) - 1)\n\n\n def createTarget(self, instrument_n, technique_n):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n # new target\n target = Target()\n technique.targets += [target]\n # return index\n return float(len(technique.targets) - 1)\n\n\n def createLayer(self, instrument_n, technique_n, target_n):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # new layer\n layer = Layer()\n target.layers += [layer]\n # return index\n return float(len(target.layers) - 1)\n\n\n def setTechniqueMIDI(self, instrument_n, technique_n, midi):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n # set MIDI\n technique.midi += [int(midi)]\n # add to MIDI map\n self.MIDI[int(midi)] = (int(instrument_n), int(technique_n))\n\n\n def setTargetChannel(self, instrument_n, technique_n, target_n, channel):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # set channel\n target.channel = int(channel)\n \n\n def setTargetGain(self, instrument_n, technique_n, target_n, gain):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # set gain\n target.gain = gain\n\n\n def setTargetADSR(self, instrument_n, technique_n, target_n, a, d, s, r):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # set adsr\n target.adsr = (a, d, s, r)\n \n \n def sortLayersByVel(self, instrument_n, technique_n, target_n):\n \n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n target.layers.sort(cmp=lambda x,y: cmp(x.velocity, y.velocity)) \n\n\n def setLayerVelocity(self, instrument_n, technique_n, target_n, layer_n, velocity):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n layer = target.layers[int(layer_n)]\n # set velocity\n layer.velocity = velocity\n\n\n def setLayerGain(self, instrument_n, technique_n, target_n, layer_n, gain):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n layer = target.layers[int(layer_n)]\n # set gain\n layer.gain = gain\n\n\n def setLayerSample(self, instrument_n, technique_n, target_n, layer_n, sample):\n\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n layer = target.layers[int(layer_n)]\n # set sample\n layer.sample = int(sample)\n\n \n def setSampleData(self, sample, data):\n\n self.samples[int(sample)] = int(data)\n \n \n def setDebug(self, enabled):\n \n debug = bool(enabled) \n if debug:\n loggingLevel = logging.DEBUG\n else:\n loggingLevel = logging.INFO\n self.logger.setLevel(loggingLevel)\n\n \n def printDebug(self, dummy):\n \n self.logger.debug(\"MIDI map:\")\n self.logger.debug(self.MIDI)\n self.logger.debug(\"samples data:\")\n self.logger.debug(self.samples)\n self.logger.debug(\"drumkit tree:\")\n for instrument in self.drumkit.instruments:\n self.logger.debug(instrument.__dict__)\n for technique in instrument.techniques:\n self.logger.debug(technique.__dict__)\n for target in technique.targets:\n self.logger.debug(target.__dict__)\n for layer in target.layers:\n self.logger.debug(layer.__dict__)\n\n\n # methods for user\n\n def isExists(self, midi):\n\n # look at our map\n if int(midi) in self.MIDI:\n return float(1)\n else:\n return float(0)\n\n\n def getInstrument(self, midi):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # return value\n return float(instrument_n)\n\n\n def getNumTargets(self, midi):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n # return value\n num = len(technique.targets)\n return float(num)\n\n\n def getTargetChannel(self, midi, target_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # return value\n channel = target.channel\n return float(channel)\n \n\n def getTargetGain(self, midi, target_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # return value\n gain = target.gain\n return float(gain)\n\n\n def getTargetADSR(self, midi, target_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # return value\n adsr = target.adsr\n return adsr\n\n\n def getNumLayers(self, midi, target_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n # return value\n num = len(target.layers)\n return float(num)\n\n\n def getLayerVelocity(self, midi, target_n, layer_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n layer = target.layers[int(layer_n)]\n # return value\n velocity = layer.velocity\n return float(velocity)\n\n\n def getLayerGain(self, midi, target_n, layer_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n layer = target.layers[int(layer_n)]\n # return value\n gain = layer.gain\n return float(gain)\n\n\n def getLayerSample(self, midi, target_n, layer_n):\n\n # restore instrument and technique\n instrument_n, technique_n = self.MIDI[int(midi)]\n # relatives\n drumkit = self.drumkit\n instrument = drumkit.instruments[int(instrument_n)]\n technique = instrument.techniques[int(technique_n)]\n target = technique.targets[int(target_n)]\n layer = target.layers[int(layer_n)]\n # return value\n sample = layer.sample\n return float(sample)\n \n \n def getSampleData(self, sample):\n\n sample = self.samples[int(sample)]\n return float(sample)\n \n # math\n \n def humanFalloff(self, vel, time, human):\n \n if human > 0:\n alpha = - math.log(1 - 0.997)/human\n falloff = 1 - math.exp(-time)/alpha\n else:\n falloff = 1\n fallVel = vel*falloff\n return float(fallVel)\n \n \n def humanVelocity(self, vel, human):\n \n humanVel = random.gauss(vel, human)\n if humanVel < 0:\n humanVel = 0\n if humanVel > 1:\n humanVel = 1\n return float(humanVel)\n \n \n def humanTime(self, human):\n \n humanTime = random.gauss(human/2, human/2)\n if (humanTime < 0):\n humanTime = 0\n if (humanTime > human):\n humanTime = human\n return float(humanTime)\n \n \n def getMorph(self, midi, target, vel):\n \n # search layers\n numLayers = int(self.getNumLayers(midi, target))\n top = numLayers - 1\n bottom = 0\n for layer in range(numLayers):\n layerVel = self.getLayerVelocity(midi, target, layer)\n topVel = self.getLayerVelocity(midi, target, top)\n bottomVel = self.getLayerVelocity(midi, target, bottom)\n if layerVel <= vel and layerVel > bottomVel:\n bottom = layer\n if layerVel >= vel and layerVel < topVel:\n top = layer\n # calculate morph\n topVel = self.getLayerVelocity(midi, target, top)\n bottomVel = self.getLayerVelocity(midi, target, bottom)\n if (topVel == bottomVel):\n morph = 0\n else:\n morph = (vel - bottomVel)/(topVel - bottomVel)\n return (float(top), float(bottom), float(morph))\n","repo_name":"ibanknatoPrad/csDrummer","sub_path":"trunk/src/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37247719325","text":"class Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n res = []\n\n carry = 0\n p1 = len(num1) - 1\n p2 = len(num2) - 1\n while p1 >= 0 or p2 >= 0:\n x1 = ord(num1[p1]) - ord('0') if p1 >= 0 else 0\n x2 = ord(num2[p2]) - ord('0') if p2 >= 0 else 0\n value = (x1 + x2 + carry) % 10\n carry = (x1 + x2 + carry) // 10\n res.append(value)\n p1 -= 1\n p2 -= 1\n \n if carry:\n res.append(carry)\n \n return ''.join(str(x) for x in res[::-1])\n \n# Method 1:\n# Use of dictionary\n\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n \n def str2int(num):\n numDict = {'0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4, '5' : 5,\n '6' : 6, '7' : 7, '8' : 8, '9' : 9}\n output = 0\n for d in num:\n output = output * 10 + numDict[d]\n return output\n \n return str(str2int(num1) + str2int(num2)) \n# Method 2\n# Use of unicode\n\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n def str2int(num):\n result = 0\n for n in num:\n result = result * 10 + ord(n) - ord('0')\n return result\n return str(str2int(num1) + str2int(num2))","repo_name":"YiruDing/LeetcodePractice","sub_path":"Neetcode305/Array,String,&Hashing/415_Add_Strings.py","file_name":"415_Add_Strings.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2310475550","text":"# !/uer/bin/env python3\n\nimport pymysql\nimport contextlib\nfrom base.logger import LOGGER\nfrom config_element import conf_load\n\n\n@contextlib.contextmanager\ndef mysql(db_conf=None):\n \"\"\"\n mysql连接方法\n examples:\n\n with mysql() as cur:\n cur.execute('select * from czb_message.sms_log where mobile=18515966636 group by send_time DESC limit 1;')\n result = cur.fetchall()\n print(result)\n :return: 游标\n \"\"\"\n if not db_conf:\n conf = conf_load('../__conf.yaml').read()['MYSQL']\n else:\n conf = db_conf\n conn = pymysql.connect(**conf)\n cur = conn.cursor(cursor=pymysql.cursors.DictCursor)\n try:\n yield cur\n except Exception as e:\n LOGGER.error(e)\n finally:\n conn.commit()\n cur.close()\n conn.close()\n","repo_name":"medivhXu/AT-interface","sub_path":"base/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"29902356821","text":"import matplotlib.pyplot as plt\nimport time, random\nplt.rcParams['font.family'] ='Malgun Gothic'\nplt.rcParams['axes.unicode_minus'] =False\n\ndef evaluate_n2(A, x):\n sumval = 0\n temp = 0\n global n\n for i in range(n):\n temp = A[i]\n for j in range(i):\n temp = temp * x\n sumval = temp + sumval\n return sumval\n\n\ndef evaluate_n(A, x):\n global n\n sumval = 0\n temp = 0\n for i in range(n):\n temp = A[i] * (x**i)\n sumval += temp\n return sumval\n\nrandom.seed()\n\nm = int(input())\nlist1 = []\nlist2 = []\nfor i in range(m):\n n = int(input())\n A = [random.randint(-1000, 1000) for _ in range(n)]\n x = random.randint(-1000, 1000)\n\n print(\"evaluate_n2\")\n before1 = time.process_time()\n print(\"before = \", before1)\n evaluate_n2(A,n)\n after1 = time.process_time()\n print(\"after = \", after1)\n result1 = after1 - before1\n list1.append(result1)\n print(\"after - before = \", result1) \n\n print(\"evaluate_n\")\n before2 = time.process_time()\n print(\"before = \", before2)\n evaluate_n(A,n)\n after2 = time.process_time()\n print(\"after = \", after2)\n result2 = after2 - before2\n list2.append(result2)\n print(\"after - before = \", result2)\n print(\"=============================================\") \n\n\nplt.plot(list1, label= \"O(n^2)\")\nplt.xticks([0, 1, 2, 3], labels=[\"1000\",\"5000\",\"10000\",\"15000\"])\nplt.plot(list2, label= \"O(n)\")\nplt.xticks([0, 1, 2, 3], labels=[\"1000\",\"5000\",\"10000\",\"15000\"])\nplt.xlabel('n값')\nplt.ylabel('시간 (초)')\n# # plt.xlim([0,100000])\n# # plt.ylim([0,3])\nplt.legend()\nplt.show()\n\n","repo_name":"parkjh96/codeTest","sub_path":"자료구조/다항식matplot.py","file_name":"다항식matplot.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21333507082","text":"import asyncio\nimport websockets\nimport youtube_dl\nimport config\nfrom player.player import toggle_status, async_skip\nfrom websocket.connection import dispatch\nfrom youtube import download\nfrom websocket.packet import Packet\nfrom youtube.utils import extract_song_metadata\n\nsocket = websockets.WebSocketCommonProtocol\n\n\nasync def on_download(websocket: socket, packet: Packet):\n with youtube_dl.YoutubeDL(config.get_opts()) as ytdl:\n loop = asyncio.get_event_loop()\n file = await download.download_song(socket, ytdl, packet.get('song'), packet.user, loop)\n\n\nasync def on_search(websocket: socket, packet: Packet):\n with youtube_dl.YoutubeDL(config.get_opts()) as ytdl:\n song_id = packet.get('id')\n song = packet.get('song')\n\n info = await download.fetch_info(websocket, ytdl, song, song_id)\n metadata = extract_song_metadata(info)\n\n response = Packet(\n event=packet.event, body={\n 'metadata': metadata,\n 'id': song_id\n }\n )\n await dispatch(websocket, response)\n\n\nasync def on_pause(websocket: socket, packet: Packet):\n await toggle_status()\n\n\nasync def on_play(websocket: socket, packet: Packet):\n await toggle_status()\n\n\nasync def on_skip(websocket: socket, packet: Packet):\n await async_skip()\n","repo_name":"Xetera/StrawberryPlayer","sub_path":"server/websocket/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"37122534712","text":"from copy import copy\n\nclass Rotate:\n def __init__(self):\n pass\n\n def rotate_extra_array(self, list_to_rotate, k):\n \"\"\"\n Builds rotated array within a temporary new array in memory. Time O(N), Space O(N).\n \"\"\"\n \n x1 = copy(list_to_rotate)\n length = len(x1)\n if length == 0:\n return x1\n k = k % length\n if k == 0:\n return x1\n x2 = copy(list_to_rotate)\n for i in range(length):\n x2[(i+k) % length] = x1[i]\n\n x1 = x2\n return x1\n \n def rotate_cycles(self, list_to_rotate, k):\n \"\"\"\n Swaps elements of array in place in a single pass. Time O(N), Space O(1).\n \"\"\"\n x = copy(list_to_rotate)\n length = len(x)\n if length == 0:\n return x\n k = k % length\n if k == 0:\n return x\n\n start = 0\n count = 0\n while count < length:\n current_ind = start\n next_ind = (start + k) % length\n next_value = x[start]\n while True:\n current_ind = next_ind\n next_ind = (current_ind + k) % length\n temp = x[current_ind]\n x[current_ind] = next_value\n next_value = temp\n count += 1\n if current_ind == start:\n break\n start += 1\n return x\n","repo_name":"ofbennett/ds-and-algos","sub_path":"python/arrays/other/array_algos.py","file_name":"array_algos.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41061536287","text":"import re\nfrom collections import deque\nfrom urllib.parse import urljoin\n\nimport requests\n\nLI_A_PATTERN = re.compile(r'
  • .*?
  • ')\nA_TEXT_PATTERN = re.compile(r']*?>(.*?)')\nA_HREF_PATTERN = re.compile(r']*?href=\"(.*?)\"\\s*[^>]*?>')\n\n\ndef decode_page(page_bytes, charsets):\n \"\"\"通过指定的字符集对页面进行解码\"\"\"\n for charset in charsets:\n try:\n return page_bytes.decode(charset)\n except UnicodeDecodeError:\n pass\n\n\ndef get_matched_parts(content_string, pattern):\n \"\"\"从字符串中提取所有跟正则表达式匹配的内容\"\"\"\n return pattern.findall(content_string, re.I) \\\n if content_string else []\n\n\ndef get_matched_part(content_string, pattern, group_no=1):\n \"\"\"从字符串中提取跟正则表达式匹配的内容\"\"\"\n match = pattern.search(content_string)\n if match:\n return match.group(group_no)\n\n\ndef get_page_html(seed_url, *, charsets=('utf-8', )):\n \"\"\"获取页面的HTML代码\"\"\"\n resp = requests.get(seed_url)\n if resp.status_code == 200:\n return decode_page(resp.content, charsets)\n\n\ndef repair_incorrect_href(current_url):\n \"\"\"修正获取的href属性\"\"\"\n href = current_url\n if href.startswith('//'):\n href = urljoin('http://', href)\n elif href.startswith('/'):\n href = urljoin(current_url, href)\n return href if href.startswith('http') else ''\n\n\ndef start_crawl(seed_url, pattern, *, max_depth=-1):\n \"\"\"开始爬取数据\"\"\"\n new_urls, visited_urls = deque(), set()\n new_urls.append((seed_url, 0))\n while new_urls:\n current_url, depth = new_urls.popleft()\n if depth != max_depth:\n # 获取解码之后的html代码\n page_html = get_page_html(current_url, charsets=('utf-8', 'gbk'))\n # print(type(page_html))\n # 用正则匹配出需要要的html字符串\n contents = get_matched_parts(page_html, pattern)\n # print(contents)\n for content in contents:\n text = get_matched_part(content, A_TEXT_PATTERN)\n href = get_matched_part(content, A_HREF_PATTERN)\n print(text, href)\n if href:\n href = repair_incorrect_href(href)\n print(text, href)\n if href and href not in visited_urls:\n new_urls.append((href, depth + 1))\n\n\ndef main():\n \"\"\"主函数\"\"\"\n start_crawl(\n seed_url='http://sports.sohu.com/nba_a.shtml',\n pattern=LI_A_PATTERN,\n max_depth=2\n )\n\n\nif __name__ == '__main__':\n main()\n\n ","repo_name":"Airfald/practice","sub_path":"Python/3-craw.py","file_name":"3-craw.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"18660066256","text":"import datetime\n\ndef read_n_bytes(fn, want):\n bytes = bytearray(want)\n with open(fn, \"rb\") as f:\n written = 0\n while written < want:\n chunk = f.read(4096)\n n = min(len(chunk), want - written)\n bytes[written:written + n] = chunk[:n]\n written += n\n\n assert len(bytes) == want\n return bytes\n\ndef main():\n x = read_n_bytes(\"/dev/random\", 2**30)\n\n for _ in range(10):\n with open(\"out.bin\", \"wb\") as f:\n t1 = datetime.datetime.now()\n\n i = 0\n while i < len(x):\n f.write(x[i:i+4096])\n i += 4096\n\n t2 = datetime.datetime.now()\n diff = (t2-t1).total_seconds()\n print(f\"blocking,{diff},{len(x) / diff}\")\n\nmain()\n","repo_name":"eatonphil/io-playground","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"62"} +{"seq_id":"13108395749","text":"import requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef fetch_stock_data(stock_list):\n \n url = 'https://api.nsepy.xyz/api/quote'\n stock_data = {}\n for stock in stock_list:\n params = {'symbol': stock, 'series': 'EQ'}\n response = requests.get(url, params=params)\n data = response.json()['data']\n stock_data[stock] = data\n return stock_data\n\n\ndef plot_stock_data(stock_data):\n \"\"\"\n Plots the closing values of each stock in a dictionary of stock data.\n \n Args:\n stock_data (dict): A dictionary where each key is a stock symbol and the corresponding value\n is a list of dictionaries containing the stock data.\n \n Returns:\n None\n \"\"\"\n fig, ax = plt.subplots()\n for stock in stock_data:\n data = stock_data[stock]\n dates = [x['Date'] for x in data]\n close_values = [x['Close'] for x in data]\n ax.plot(dates, close_values, label=stock)\n ax.set_xticks(ax.get_xticks()[::30])\n ax.legend()\n ax.set_title('Closing Values of Stocks')\n plt.show()\n\n\ndef concat_stock_data(stock_data):\n \"\"\"\n Concatenates the data for all the stocks in a dictionary into a single Pandas DataFrame.\n \n Args:\n stock_data (dict): A dictionary where each key is a stock symbol and the corresponding value\n is a list of dictionaries containing the stock data.\n \n Returns:\n A Pandas DataFrame with the concatenated data, in the desired table format.\n \"\"\"\n stock_data_list = []\n for stock in stock_data:\n data = stock_data[stock]\n df = pd.DataFrame(data)\n df['Stock'] = stock\n stock_data_list.append(df)\n table_value = pd.concat(stock_data_list, axis=0, ignore_index=True)\n table_value = table_value.pivot(index='Date', columns='Stock', values='Close')\n return table_value\n\n\n# Fetch stock data\nstock_list = ['SBIN', 'ASIANPAINT', 'AXISBANK']\nstock_data = fetch_stock_data(stock_list)\n\n# Plot stock data\nplot_stock_data(stock_data)\n\n# Concatenate stock data\ntable_value = concat_stock_data(stock_data)\nprint(table_value)","repo_name":"pivarsha/New-folder--3-","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25999066826","text":"import glob\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\n\n\n# ******************************************************************************\n# Creating subfolders with subclasses as names\n# ******************************************************************************\n\ndf = pd.read_csv(\"./combined-data/combined_metadata.csv\", index_col=0)\ncells, labels = [], []\nsubclasses = {}\nfor _, row in df.iterrows():\n if row[\"dataset\"] == \"gouwens\":\n subclass = row[\"t-type\"][:3]\n if subclass == \"Ser\":\n continue\n\n cells.append(row[\"cell_id\"])\n labels.append(subclass)\n\n if subclass in subclasses:\n subclasses[subclass] += 1\n else:\n subclasses[subclass] = 1\n\n# Create directory\ntrain_dir = f\"./gouwens-data/training_images_subclass\"\nif not os.path.isdir(train_dir):\n os.mkdir(train_dir)\n\nsubclass_cell_counts = {}\nfor cell, label in zip(cells, labels):\n try:\n src = f\"./gouwens-data/preprocessed_images/{cell}.png\"\n dst = f\"./gouwens-data/training_images_subclass/{label}/{cell}.png\"\n\n shutil.copy(src, dst)\n\n if label in subclass_cell_counts:\n subclass_cell_counts[label] += 1\n else:\n subclass_cell_counts[label] = 1\n except:\n print(f\"File not found: {cell}.png\")\n\nprint(subclass_cell_counts)\n","repo_name":"youngseok-seo/MorphologyCNN","sub_path":"create_subclass_subfolders.py","file_name":"create_subclass_subfolders.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25534850744","text":"#!/usr/bin/env python3\n\nimport sys\nREQUIRED_PYTHON = (3, 4, 0)\nif sys.version_info < REQUIRED_PYTHON:\n print(\"Please upgrade your version of python to at least v{}.{}.{}\".format(*REQUIRED_PYTHON))\n exit(1)\n\n# Set the scripts working directory to it's own location so relative\n# paths work as expected\nimport os\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\nimport argparse\nimport subprocess\nimport tempfile\nimport json\n\ndef execute_no_fail(command, *args, **kwargs):\n result = execute(command, *args, **kwargs)\n if result[0] != 0:\n raise Exception(\"The command {} returned {}\".format(command, result[0]))\n return result\n\ndef execute(command, cwd=None, shell=False, stdout=None, stderr=None):\n print(\"EXECUTING:\", \" \".join(command))\n # return 0, \"\".encode(\"utf-8\"), \"\".encode(\"utf-8\")\n proc = subprocess.Popen(command, cwd=cwd, shell=shell, stdout=stdout, stderr=stderr)\n (out, err) = proc.communicate()\n return proc.wait(), out, err\n\nclass Git(object):\n\n @staticmethod\n def ensure_updated_head(repopath=\".\"):\n print(\"Ensuring there are no un-pushed changes\")\n execute_no_fail([\"git\", \"diff\", \"--exit-code\", \"--stat\", \"origin/master\"], cwd=repopath)\n\n @staticmethod\n def get_latest_commit_hash(repopath=\".\"):\n binary = execute_no_fail([\"git\", \"rev-parse\", \"HEAD\"], cwd=repopath, stdout=subprocess.PIPE)[1]\n return binary.decode(\"utf-8\").strip(\"\\n\")\n\n\nclass Start(object):\n @staticmethod\n def start_all():\n for name in [\"db\", \"api\", \"ntfy\"]:\n print(\"Starting\", name, \"container\")\n execute_no_fail([\"docker\", \"start\", name])\n print(\"\\nThe api is accessible from port 8000 and socket.io from 8060\")\n\n @staticmethod\n def setup_all(pull):\n volume_overrides = { \"apisource\": os.getcwd() }\n with open(\"./docker/api/Dockerrun.aws.json\", \"r\") as f:\n Create.setup_with_dockerrun(json.loads(f.read()), True, volume_overrides, \"host\", pull)\n with open(\"./docker/notify/Dockerrun.aws.json\", \"r\") as f:\n Create.setup_with_dockerrun(json.loads(f.read()), True, volume_overrides, \"host\", pull)\n with open(\"./docker/db/Dockerrun.aws.json\", \"r\") as f:\n Create.setup_with_dockerrun(json.loads(f.read()), True, volume_overrides, \"host\", pull)\n\n @staticmethod\n def parse_args():\n parser = argparse.ArgumentParser(description=\"Starts the api environment\")\n parser.add_argument(\"-l\", \"--local\", default=False, action=\"store_true\",\n help=\"Don't pull any images, just run the latest local tag\")\n args = parser.parse_args()\n Start.setup_all(not args.local)\n Start.start_all()\n\nclass Stop(object):\n @staticmethod\n def stop_container(name, time_till_kill=3):\n print(\"Stopping {} container\".format(name))\n return execute_no_fail([\"docker\", \"stop\", \"-t\", str(time_till_kill), name])\n\n @staticmethod\n def stop_api_env():\n Stop.stop_container(\"ntfy\")\n Stop.stop_container(\"api\")\n Stop.stop_container(\"db\")\n\n @staticmethod\n def parse_args():\n parser = argparse.ArgumentParser(description=\"Stops the api environment\")\n parser.parse_args()\n Stop.stop_api_env()\n\n\nclass Create(object):\n @staticmethod\n def kill_and_delete(name):\n execute([\"docker\", \"rm\", \"-f\", name])\n\n @staticmethod\n def create_image(name, source, no_cache):\n args = [\"docker\", \"build\", \"-t\", name]\n if no_cache:\n args.append(\"--no-cache\")\n args.append(source)\n execute_no_fail(args)\n\n @staticmethod\n def pull_image(name):\n execute_no_fail([\"docker\", \"pull\", name])\n\n @staticmethod\n def setup_with_dockerrun(dockerrun, port_mirror=False, volume_overrides={}, net=\"bridge\", pull=True):\n containers = dockerrun[\"containerDefinitions\"]\n volume_mounts = {\n v[\"name\"]: volume_overrides.get(v[\"name\"], v[\"host\"][\"sourcePath\"])\n for v in dockerrun[\"volumes\"] }\n for c in containers:\n ports = [\n [ p[\"containerPort\"] if port_mirror else p[\"hostPort\"], p[\"containerPort\"] ]\n for p in c[\"portMappings\"] ]\n volumes = [\n [ volume_mounts[v[\"sourceVolume\"]], v[\"containerPath\"], v[\"readOnly\"] ]\n for v in c[\"mountPoints\"] ]\n if pull:\n Create.pull_image(c[\"image\"])\n Create.kill_and_delete(c[\"name\"])\n Create.create_container(c[\"name\"], c[\"image\"], ports=ports, volumes=volumes, net=net)\n\n @staticmethod\n def create_container(name, image, ports=None, volumes=None, links=None, tty=False, net=\"bridge\"):\n command = [\"docker\", \"create\", \"--name\", name]\n if ports:\n for p in ports:\n command.extend([\"-p\", str(p[0]) + \":\" + str(p[1])])\n if volumes:\n for v in volumes:\n ro = \":ro\" if v[2] else \"\"\n command.extend([\"-v\", v[0] + \":\" + v[1] + ro])\n if links:\n for link in links:\n command.extend([\"--link\", link])\n if tty:\n command.append(\"-t\")\n command.append(\"--net=\" + net)\n command.append(image)\n execute(command)\n\n @staticmethod\n def setup_api_container(volume, no_cache):\n Create.kill_and_delete(\"api\")\n Create.create_image(\"delegateit/gatapi\", \"./docker/api\", no_cache)\n\n @staticmethod\n def setup_ntfy_container(volume, no_cache):\n Create.kill_and_delete(\"ntfy\")\n Create.create_image(\"delegateit/gatntfy\", \"./docker/notify\", no_cache)\n\n @staticmethod\n def setup_db_container(volume, no_cache):\n Create.kill_and_delete(\"db\")\n Create.create_image(\"delegateit/gatdb\", \"./docker/db\", no_cache)\n\n @staticmethod\n def parse_args():\n containers = [\"api\", \"db\", \"ntfy\", \"fullapi\"]\n parser = argparse.ArgumentParser(description=\"docker container and image creation for DelegateIt\")\n parser.add_argument(\"name\", choices=containers,\n help=\"the name of the container to create.\")\n parser.add_argument(\"--no-cache\", default=False, action=\"store_true\", dest=\"no_cache\",\n help=\"Do not use docker's cache when building images.\")\n args = parser.parse_args()\n\n abs_source = os.getcwd()\n Create.create_image(\"delegateit/gatbase\", \"./docker/base\", args.no_cache)\n if args.name == \"api\" or args.name == \"fullapi\":\n Create.setup_api_container(abs_source, False)\n if args.name == \"db\" or args.name == \"fullapi\":\n Create.setup_db_container(abs_source, False)\n if args.name == \"ntfy\" or args.name == \"fullapi\":\n Create.setup_ntfy_container(abs_source, False)\n\nclass Package(object):\n excludes = [\n \"*/.git/*\",\n \"*/__pycache__/*\",\n \"*/.elasticbeanstalk/*\",\n \"*.swp\",\n \"*/.noseids\",\n \"apisource/testlib/*\"\n ]\n\n @staticmethod\n def package_lambda(apisource, apiconfig, outdir, tempdir):\n print(\"Packaging lambda\")\n execute([\"rm\", os.path.join(outdir, \"gator-lambda.zip\")])\n execute_no_fail([\"cp\", \"-R\", os.path.join(apisource, \"notify\"), tempdir])\n execute_no_fail([\"cp\", apiconfig, os.path.join(tempdir, \"notify\", \"config.json\")])\n execute_no_fail([\"zip\", \"-r\", os.path.join(os.getcwd(), outdir, \"gator-lambda.zip\"),\n \"lambda.js\",\n \"gator.js\",\n \"push_notifications.py\",\n \"config.json\"],\n cwd=os.path.join(tempdir, \"notify\"))\n\n @staticmethod\n def package_api(apisource, apiconfig, outdir, tempdir):\n print(\"Packaging api\")\n execute([\"rm\", os.path.join(outdir, \"gator-api.zip\")])\n execute_no_fail([\"cp\", \"-R\", apisource, os.path.join(tempdir, \"apisource\")])\n execute_no_fail([\"cp\", apiconfig,\n os.path.join(tempdir, \"apisource\", \"local-config.json\")])\n execute_no_fail([\"cp\", os.path.join(\"docker\", \"api\", \"Dockerrun.aws.json\"), tempdir])\n execute_no_fail([\"cp\", os.path.join(\"docker\", \"api\", \"env.yaml\"), tempdir])\n zip_args = [\"zip\", \"-r\", os.path.join(os.getcwd(), outdir, \"gator-api.zip\"),\n \"apisource\",\n \"Dockerrun.aws.json\",\n \"env.yaml\",\n \"-x\"]\n zip_args.extend(Package.excludes)\n execute_no_fail(zip_args, cwd=tempdir)\n\n @staticmethod\n def package_notify(apisource, apiconfig, outdir, tempdir):\n print(\"Packaging notify\")\n execute([\"rm\", os.path.join(outdir, \"gator-notify.zip\")])\n execute_no_fail([\"cp\", \"-R\", apisource, os.path.join(tempdir, \"apisource\")])\n execute_no_fail([\"cp\", apiconfig,\n os.path.join(tempdir, \"apisource\", \"local-config.json\")])\n execute_no_fail([\"cp\", os.path.join(\"docker\", \"notify\", \"Dockerrun.aws.json\"), tempdir])\n execute_no_fail([\"cp\", os.path.join(\"docker\", \"notify\", \"env.yaml\"), tempdir])\n zip_args = [\"zip\", \"-r\", os.path.join(os.getcwd(), outdir, \"gator-notify.zip\"),\n \"apisource\",\n \"Dockerrun.aws.json\",\n \"env.yaml\",\n \"-x\"]\n zip_args.extend(Package.excludes)\n execute_no_fail(zip_args, cwd=tempdir)\n\n @staticmethod\n def package_all(apisource, apiconfig, outdir):\n with tempfile.TemporaryDirectory() as tempdir:\n api_temp = os.path.join(tempdir, \"api\")\n notify_temp = os.path.join(tempdir, \"notify\")\n lambda_temp = os.path.join(tempdir, \"lambda\")\n execute_no_fail([\"mkdir\", api_temp])\n execute_no_fail([\"mkdir\", lambda_temp])\n execute_no_fail([\"mkdir\", notify_temp])\n Package.package_api(apisource, apiconfig, outdir, api_temp)\n Package.package_notify(apisource, apiconfig, outdir, notify_temp)\n Package.package_lambda(apisource, apiconfig, outdir, lambda_temp)\n\n @staticmethod\n def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Packages the environment for elastic beanstalk in a zip\")\n parser.add_argument(\"config\",\n help=\"the config file to use\")\n parser.add_argument(\"-o\", \"--outdir\", default=\".\",\n help=\"The folder to store the zip files\")\n args = parser.parse_args()\n Package.package_all(\".\", args.config, args.outdir)\n\nclass DockerPush(object):\n\n @staticmethod\n def docker_push_list(image_list, tag=None, force=False):\n for image in image_list:\n if tag is not None:\n args = [\"docker\", \"tag\", image + \":latest\", image + \":\" + tag]\n if force:\n args.append(\"-f\")\n execute_no_fail(args)\n execute_no_fail([\"docker\", \"push\", image])\n\n @staticmethod\n def update_dockerrun_image(filename, image):\n with open(filename, \"r\") as f:\n dockerrun = json.loads(f.read())\n dockerrun[\"containerDefinitions\"][0][\"image\"] = image\n with open(filename, \"w\") as f:\n f.write(json.dumps(dockerrun, indent=4, sort_keys=True))\n\n @staticmethod\n def docker_deploy(force=False):\n Git.ensure_updated_head()\n tag = Git.get_latest_commit_hash()[:7]\n DockerPush.docker_push_list([\n \"delegateit/gatdb\",\n \"delegateit/gatapi\",\n \"delegateit/gatntfy\"], tag, force)\n DockerPush.update_dockerrun_image(\"./docker/api/Dockerrun.aws.json\", \"delegateit/gatapi:\" + tag)\n DockerPush.update_dockerrun_image(\"./docker/notify/Dockerrun.aws.json\", \"delegateit/gatntfy:\" + tag)\n\n\n @staticmethod\n def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Pushes the images to docker hub and updates the Dockerrun.aws.json files\")\n parser.add_argument(\"-f\", \"--force\",\n help=\"Image tagging is forced. Will overwrite previous tags\")\n args = parser.parse_args()\n DockerPush.docker_deploy(args.force)\n\nclass Health(object):\n def display(eb_group):\n cmd = \"tmux new-session -d -s eb-health 'cd docker/api && eb health gator-api-\" + eb_group + \" --refresh';\"\n cmd += \"tmux split-window -v 'cd docker/notify && eb health gator-notify-\" + eb_group + \" --refresh';\"\n cmd += \"tmux -2 attach-session -d;\"\n execute(cmd, shell=True)\n\n\n @staticmethod\n def parse_args():\n env_types = [\"live\", \"test\"]\n parser = argparse.ArgumentParser(\n description=\"Displays the health of the elastic beanstalk environments\")\n parser.add_argument(\"eb_group\", choices=env_types,\n help=\"The type of environment to monitor\")\n args = parser.parse_args()\n Health.display(args.eb_group)\n\n\nclass Deploy(object):\n @staticmethod\n def get_commit_hash(apipath):\n print(\"Making sure api directory has a committed HEAD\")\n\n @staticmethod\n def eb_deploy(modules, eb_group, commit_hash):\n for m in modules:\n env_name = \"gator-\" + m + \"-\" + eb_group\n args = [\"eb\", \"deploy\", env_name, \"-nh\"]\n args.extend([\"--label\", env_name + \"-\" + commit_hash[:7]])\n args.extend([\"--message\", \"https://github.com/DelegateIt/OrderAPI/commit/\" + commit_hash])\n execute(args, cwd=os.path.join(\".\", \"docker\", m))\n\n @staticmethod\n def lambda_deploy(lambda_name, lambda_path):\n execute_no_fail([\"aws\", \"lambda\", \"update-function-code\", \"--function-name\", lambda_name, \"--zip-file\", \"fileb://\" + lambda_path, \"--publish\"])\n\n\n @staticmethod\n def deploy(apipath, apiconfig, eb_group, notify_lambda_name, push_lambda_name):\n Git.ensure_updated_head(apipath)\n commit_hash = Git.get_latest_commit_hash(apipath)\n Package.package_all(apipath, apiconfig, \".\")\n print(\"Deploying commit hash\", commit_hash)\n Deploy.eb_deploy([\"api\", \"notify\"], eb_group, commit_hash)\n Deploy.lambda_deploy(notify_lambda_name, \"./gator-lambda.zip\")\n Deploy.lambda_deploy(push_lambda_name, \"./gator-lambda.zip\")\n\n @staticmethod\n def parse_args():\n types = {\n \"test\": {\n \"config\": \"aws-test-config.json\",\n \"eb-group\": \"test\",\n \"notify_lambda\": \"TestTransactionChange\",\n \"push_lambda\": \"TestPushNotifications\"\n },\n \"live\": {\n \"config\": \"aws-prod-config.json\",\n \"eb-group\": \"live\",\n \"notify_lambda\": \"TransactionUpdate\",\n \"push_lambda\": \"PushNotifications\"\n }\n }\n parser = argparse.ArgumentParser(\n description=\"Deploys the code to elastic beanstalk\")\n parser.add_argument(\"deploy_type\", choices=types.keys(),\n help=\"The type of deployment\")\n args = parser.parse_args()\n\n apipath = \".\"\n deploy_type = types[args.deploy_type]\n\n # Prompt the user if they are deploying into production\n if args.deploy_type == \"live\":\n resp = input(\"Are you sure you want to deploy to PRODUCTION [Y/N]: \")\n if resp != \"Y\":\n sys.exit(0)\n\n apiconfig = os.path.join(apipath, deploy_type[\"config\"])\n Deploy.deploy(apipath, apiconfig, deploy_type[\"eb-group\"], deploy_type[\"notify_lambda\"], deploy_type[\"push_lambda\"])\n\n\nif __name__ == \"__main__\":\n actions = {\n \"create\": {\n \"parse\": Create.parse_args,\n \"description\": \"create the docker containers and images\"\n },\n \"start\": {\n \"parse\": Start.parse_args,\n \"description\": \"Starts the api environment\"\n },\n \"stop\": {\n \"parse\": Stop.parse_args,\n \"description\": \"Stops the api environment\"\n },\n \"package\": {\n \"parse\": Package.parse_args,\n \"description\": \"Packages the environment for elastic beanstalk in a zip\"\n },\n \"docker-push\": {\n \"parse\": DockerPush.parse_args,\n \"description\": \"Pushes the images to docker hub and updates the Dockerrun.aws.json files\"\n },\n \"deploy\": {\n \"parse\": Deploy.parse_args,\n \"description\": \"Deploys the code to elastic beanstalk\"\n },\n \"health\": {\n \"parse\": Health.parse_args,\n \"description\": \"Displays the health for the elastic beanstalk environment\"\n }\n }\n parser = argparse.ArgumentParser(\n description=\"Helps setup and control the environents for DelegateIt. Possible actions include: \" +\n \". \".join([k + \" - \" + v[\"description\"] for k,v in actions.items()]))\n parser.add_argument(\"action\", choices=actions.keys(), help=\"The action to perform.\")\n parser.add_argument('args', nargs=argparse.REMAINDER,\n help=\"A list of arguments to pass to the action\")\n\n\n args = parser.parse_args()\n action_name = sys.argv[1]\n del sys.argv[1]\n sys.argv[0] += \" \" + args.action\n actions[action_name][\"parse\"]()\n","repo_name":"DelegateIt/OrderAPI","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":17180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"23878531759","text":"from flask import Flask, request\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/upload\", methods=[\"POST\"])\ndef upload():\n \"\"\"接收前段传来的图片\"\"\"\n file_obj = request.files.get(\"pic\")\n if file_obj is None:\n return \"未上传文件\"\n\n # 将接收到的文件保存到本地\n # 1 创建一个新文件\n # f = open(\"./demo.jpg\", \"wb\")\n # 2 向文件中写内容\n # data = file_obj.read()\n # f.write(data)\n # 3 关闭文件\n # f.close()\n # return \"上传成功\"\n\n # 直接使用上传的文件对象保存\n file_obj.save(\"./demo1.jpg\")\n return \"上传成功\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Panda0229/flasky","sub_path":"05_upload.py","file_name":"05_upload.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31530279870","text":"import csv\nfrom pymongo import MongoClient\nfrom settings import *\n\nclient = MongoClient()\n\ndb = client[MONGO_DB]\ncollection = db[MONGO_NEWS_SOURCE_COLLECTION]\n\nwith open('news_source.csv') as csv_file:\n\n csv_reader = csv.reader(csv_file)\n\n for row in csv_reader:\n source = {\"twitter_handle\": row[0], \"link_pattern\": row[1]}\n\n if collection.count(source) == 0:\n\n print(source)\n\n collection.insert_one(source)\n","repo_name":"petakajaib/building-a-news-crawler","sub_path":"add_news_source.py","file_name":"add_news_source.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42717696658","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \"Open\n\n# Disclaimer: Nothing herein is financial advice, and NOT a recommendation to trade real money. Many platforms exist for simulated trading (paper trading) which can be used for building and developing the methods discussed. Please use common sense and always first consult a professional before trading or investing.\n\n# # Part 1: Install FinRL\n\n# In[ ]:\n\n\n## install finrl library\n#get_ipython().system('pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git')\n\n\n# ## Import related modules\n\n# In[13]:\n\n\nfrom finrl.train import train\nfrom finrl.test import test\nfrom finrl.config_tickers import DOW_30_TICKER\nfrom finrl.config import INDICATORS\nfrom finrl.meta.env_stock_trading.env_stocktrading_np import StockTradingEnv\nfrom finrl.meta.env_stock_trading.env_stock_papertrading import AlpacaPaperTrading\nfrom finrl.meta.data_processor import DataProcessor\nfrom finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline\n\nimport numpy as np\nimport pandas as pd\n\n\n# ## Import Dow Jones 30 Symbols\n\n# In[14]:\n\n\nticker_list = DOW_30_TICKER\naction_dim = len(DOW_30_TICKER)\ncandle_time_interval = '15Min'\n\n\n# In[15]:\n\n\nprint(ticker_list)\n\n\n# In[16]:\n\n\nprint(INDICATORS)\n\n\n# ## Calculate the DRL state dimension manually for paper trading\n\n# In[17]:\n\n\n# amount + (turbulence, turbulence_bool) + (price, shares, cd (holding time)) * stock_dim + tech_dim\nstate_dim = 1 + 2 + 3 * action_dim + len(INDICATORS) * action_dim\n\n\n# In[18]:\n\n\nstate_dim\n\n\n# ## Get the API Keys Ready\n\n# In[19]:\n\n\nAPI_KEY = \"YOUR_ALPACA_API_KEY\"\nAPI_SECRET = \"YOUR_ALPACA_API_SECRET\"\nAPI_BASE_URL = 'https://paper-api.alpaca.markets'\ndata_url = 'wss://data.alpaca.markets'\nenv = StockTradingEnv\n\n\n# ## Show the data\n\n# ### Step 1. Pick a data source\n\n# In[20]:\n\n\nDP = DataProcessor(data_source = 'alpaca',\n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n API_BASE_URL = API_BASE_URL\n )\n\n\n# ### Step 2. Get ticker list, Set start date and end date, specify the data frequency\n\n# In[27]:\n\n\ndata = DP.download_data(start_date = '2021-10-01', \n end_date = '2021-10-05',\n ticker_list = ticker_list, \n time_interval= candle_time_interval)\n\n\n# ### Step 3. Data Cleaning & Feature Engineering\n\n# In[28]:\n\n\ndata = DP.clean_data(data)\ndata = DP.add_technical_indicator(data, INDICATORS)\ndata = DP.add_vix(data)\n\n\n# In[29]:\n\n\ndata.tail(20)\n\n\n# ### Step 4. Transform to numpy array\n\n# In[30]:\n\n\nprice_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix='True')\n\n\n# In[31]:\n\n\nprice_array\n\n\n# # Part 2: Train the agent\n\n# ## Train\n\n# In[32]:\n\n\nERL_PARAMS = {\"learning_rate\": 3e-6,\"batch_size\": 2048,\"gamma\": 0.985,\n \"seed\":312,\"net_dimension\":512, \"target_step\":5000, \"eval_gap\":30,\n \"eval_times\":1} \n#if you want to use larger datasets (change to longer period), and it raises error, \n#please try to increase \"target_step\". It should be larger than the episode steps. \n\n\n# In[33]:\n\n\ntrain(start_date = '2021-10-11', \n end_date = '2021-10-15',\n ticker_list = ticker_list, \n data_source = 'alpaca',\n time_interval= candle_time_interval, \n technical_indicator_list= INDICATORS,\n drl_lib='elegantrl', \n env=env,\n model_name='ppo', \n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n API_BASE_URL = API_BASE_URL,\n erl_params=ERL_PARAMS,\n cwd='./papertrading_erl', #current_working_dir\n break_step=1e5)\n\n\n# ## Test\n\n# In[34]:\n\n\naccount_value_erl=test(start_date = '2021-10-18', \n end_date = '2021-10-19',\n ticker_list = ticker_list, \n data_source = 'alpaca',\n time_interval= candle_time_interval, \n technical_indicator_list= INDICATORS,\n drl_lib='elegantrl', \n env=env, \n model_name='ppo', \n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n API_BASE_URL = API_BASE_URL,\n cwd='./papertrading_erl',\n net_dimension = 512)\n\n\n# ## Use full data to train \n\n# After tuning well, retrain on the training and testing sets\n\n# In[35]:\n\n\ntrain(start_date = '2021-10-11', \n end_date = '2021-10-19',\n ticker_list = ticker_list, \n data_source = 'alpaca',\n time_interval= candle_time_interval, \n technical_indicator_list= INDICATORS,\n drl_lib='elegantrl', \n env=env, \n model_name='ppo', \n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n API_BASE_URL = API_BASE_URL,\n erl_params=ERL_PARAMS,\n cwd='./papertrading_erl_retrain',\n break_step=5e4)\n\n\n# # Part 3: Deploy the agent\n\n# ## Setup Alpaca Paper trading environment\n\n# In[42]:\n\n\nimport datetime\nimport threading\nfrom finrl.meta.data_processors.processor_alpaca import AlpacaProcessor\nimport alpaca_trade_api as tradeapi\nimport time\nimport pandas as pd\nimport numpy as np\nimport torch\nimport gym\n\nclass AlpacaPaperTrading():\n\n def __init__(self,ticker_list, time_interval, drl_lib, agent, cwd, net_dim, \n state_dim, action_dim, API_KEY, API_SECRET, \n API_BASE_URL, tech_indicator_list, turbulence_thresh=30, \n max_stock=1e2, latency = None):\n #load agent\n self.drl_lib = drl_lib\n if agent =='ppo':\n if drl_lib == 'elegantrl': \n from elegantrl.agents import AgentPPO\n from elegantrl.train.run import init_agent\n from elegantrl.train.config import Arguments\n #load agent\n config = {'state_dim':state_dim,\n 'action_dim':action_dim,}\n args = Arguments(agent_class=AgentPPO, env=StockEnvEmpty(config))\n args.cwd = cwd\n args.net_dim = net_dim\n # load agent\n try:\n agent = init_agent(args, gpu_id = 0)\n self.act = agent.act\n self.device = agent.device\n except BaseException:\n raise ValueError(\"Fail to load agent!\")\n \n elif drl_lib == 'rllib':\n from ray.rllib.agents import ppo\n from ray.rllib.agents.ppo.ppo import PPOTrainer\n \n config = ppo.DEFAULT_CONFIG.copy()\n config['env'] = StockEnvEmpty\n config[\"log_level\"] = \"WARN\"\n config['env_config'] = {'state_dim':state_dim,\n 'action_dim':action_dim,}\n trainer = PPOTrainer(env=StockEnvEmpty, config=config)\n trainer.restore(cwd)\n try:\n trainer.restore(cwd)\n self.agent = trainer\n print(\"Restoring from checkpoint path\", cwd)\n except:\n raise ValueError('Fail to load agent!')\n \n elif drl_lib == 'stable_baselines3':\n from stable_baselines3 import PPO\n \n try:\n #load agent\n self.model = PPO.load(cwd)\n print(\"Successfully load model\", cwd)\n except:\n raise ValueError('Fail to load agent!')\n \n else:\n raise ValueError('The DRL library input is NOT supported yet. Please check your input.')\n \n else:\n raise ValueError('Agent input is NOT supported yet.')\n \n \n \n #connect to Alpaca trading API\n try:\n self.alpaca = tradeapi.REST(API_KEY,API_SECRET,API_BASE_URL, 'v2')\n except:\n raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')\n \n #read trading time interval\n if time_interval == '1s':\n self.time_interval = 1\n elif time_interval == '5s':\n self.time_interval = 5\n elif time_interval == candle_time_interval:\n self.time_interval = 60\n elif time_interval == '5Min':\n self.time_interval = 60 * 5\n elif time_interval == '15Min':\n self.time_interval = 60 * 15\n else:\n raise ValueError('Time interval input is NOT supported yet.')\n \n #read trading settings\n self.tech_indicator_list = tech_indicator_list\n self.turbulence_thresh = turbulence_thresh\n self.max_stock = max_stock \n \n #initialize account\n self.stocks = np.asarray([0] * len(ticker_list)) #stocks holding\n self.stocks_cd = np.zeros_like(self.stocks) \n self.cash = None #cash record \n self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = ticker_list)\n self.asset_list = []\n self.price = np.asarray([0] * len(ticker_list))\n self.stockUniverse = ticker_list\n self.turbulence_bool = 0\n self.equities = []\n \n def test_latency(self, test_times = 10): \n total_time = 0\n for i in range(0, test_times):\n time0 = time.time()\n self.get_state()\n time1 = time.time()\n temp_time = time1 - time0\n total_time += temp_time\n latency = total_time/test_times\n print('latency for data processing: ', latency)\n return latency\n \n def run(self):\n orders = self.alpaca.list_orders(status=\"open\")\n for order in orders:\n self.alpaca.cancel_order(order.id)\n \n # Wait for market to open.\n print(\"Waiting for market to open...\")\n tAMO = threading.Thread(target=self.awaitMarketOpen)\n tAMO.start()\n tAMO.join()\n print(\"Market opened.\")\n while True:\n\n # Figure out when the market will close so we can prepare to sell beforehand.\n clock = self.alpaca.get_clock()\n closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()\n currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()\n self.timeToClose = closingTime - currTime\n \n if(self.timeToClose < (60)):\n # Close all positions when 1 minutes til market close.\n print(\"Market closing soon. Stop trading.\")\n break\n \n '''# Close all positions when 1 minutes til market close.\n print(\"Market closing soon. Closing positions.\")\n \n positions = self.alpaca.list_positions()\n for position in positions:\n if(position.side == 'long'):\n orderSide = 'sell'\n else:\n orderSide = 'buy'\n qty = abs(int(float(position.qty)))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n \n # Run script again after market close for next trading day.\n print(\"Sleeping until market close (15 minutes).\")\n time.sleep(60 * 15)'''\n \n else:\n trade = threading.Thread(target=self.trade)\n trade.start()\n trade.join()\n last_equity = float(self.alpaca.get_account().last_equity)\n cur_time = time.time()\n self.equities.append([cur_time,last_equity])\n time.sleep(self.time_interval)\n \n def awaitMarketOpen(self):\n isOpen = self.alpaca.get_clock().is_open\n while(not isOpen):\n clock = self.alpaca.get_clock()\n openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()\n currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()\n timeToOpen = int((openingTime - currTime) / 60)\n print(str(timeToOpen) + \" minutes til market open.\")\n time.sleep(60)\n isOpen = self.alpaca.get_clock().is_open\n \n def trade(self):\n state = self.get_state()\n \n if self.drl_lib == 'elegantrl':\n with torch.no_grad():\n s_tensor = torch.as_tensor((state,), device=self.device)\n a_tensor = self.act(s_tensor) \n action = a_tensor.detach().cpu().numpy()[0] \n \n action = (action * self.max_stock).astype(int)\n \n elif self.drl_lib == 'rllib':\n action = self.agent.compute_single_action(state)\n \n elif self.drl_lib == 'stable_baselines3':\n action = self.model.predict(state)[0]\n \n else:\n raise ValueError('The DRL library input is NOT supported yet. Please check your input.')\n \n self.stocks_cd += 1\n if self.turbulence_bool == 0:\n min_action = 10 # stock_cd\n for index in np.where(action < -min_action)[0]: # sell_index:\n sell_num_shares = min(self.stocks[index], -action[index])\n qty = abs(int(sell_num_shares))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n self.cash = float(self.alpaca.get_account().cash)\n self.stocks_cd[index] = 0\n\n for index in np.where(action > min_action)[0]: # buy_index:\n if self.cash < 0:\n tmp_cash = 0\n else:\n tmp_cash = self.cash\n buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))\n qty = abs(int(buy_num_shares))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n self.cash = float(self.alpaca.get_account().cash)\n self.stocks_cd[index] = 0\n \n else: # sell all when turbulence\n positions = self.alpaca.list_positions()\n for position in positions:\n if(position.side == 'long'):\n orderSide = 'sell'\n else:\n orderSide = 'buy'\n qty = abs(int(float(position.qty)))\n respSO = []\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))\n tSubmitOrder.start()\n tSubmitOrder.join()\n \n self.stocks_cd[:] = 0\n \n \n def get_state(self):\n alpaca = AlpacaProcessor(api=self.alpaca)\n price, tech, turbulence = alpaca.fetch_latest_data(ticker_list = self.stockUniverse, time_interval=candle_time_interval,\n tech_indicator_list=self.tech_indicator_list)\n turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0\n \n turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)\n \n tech = tech * 2 ** -7\n positions = self.alpaca.list_positions()\n stocks = [0] * len(self.stockUniverse)\n for position in positions:\n ind = self.stockUniverse.index(position.symbol)\n stocks[ind] = ( abs(int(float(position.qty))))\n \n stocks = np.asarray(stocks, dtype = float)\n cash = float(self.alpaca.get_account().cash)\n self.cash = cash\n self.stocks = stocks\n self.turbulence_bool = turbulence_bool \n self.price = price\n \n \n \n amount = np.array(self.cash * (2 ** -12), dtype=np.float32)\n scale = np.array(2 ** -6, dtype=np.float32)\n state = np.hstack((amount,\n turbulence,\n self.turbulence_bool,\n price * scale,\n self.stocks * scale,\n self.stocks_cd,\n tech,\n )).astype(np.float32)\n print(len(self.stockUniverse))\n return state\n \n def submitOrder(self, qty, stock, side, resp):\n if(qty > 0):\n try:\n self.alpaca.submit_order(stock, qty, side, \"market\", \"day\")\n print(\"Market order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | completed.\")\n resp.append(True)\n except:\n print(\"Order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | did not go through.\")\n resp.append(False)\n else:\n print(\"Quantity is 0, order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | not completed.\")\n resp.append(True)\n\n @staticmethod\n def sigmoid_sign(ary, thresh):\n def sigmoid(x):\n return 1 / (1 + np.exp(-x * np.e)) - 0.5\n\n return sigmoid(ary / thresh) * thresh\n \nclass StockEnvEmpty(gym.Env):\n #Empty Env used for loading rllib agent\n def __init__(self,config):\n state_dim = config['state_dim']\n action_dim = config['action_dim']\n self.env_num = 1\n self.max_step = 10000\n self.env_name = 'StockEnvEmpty'\n self.state_dim = state_dim \n self.action_dim = action_dim\n self.if_discrete = False \n self.target_return = 9999\n self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)\n self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)\n \n def reset(self):\n return \n\n def step(self, actions):\n return\n\n\n# ## Run Paper trading\n\n# In[43]:\n\n\nprint(DOW_30_TICKER)\n\n\n# In[44]:\n\n\nstate_dim\n\n\n# In[45]:\n\n\naction_dim\n\n\n# In[46]:\n\n\npaper_trading_erl = AlpacaPaperTrading(ticker_list = DOW_30_TICKER, \n time_interval = candle_time_interval, \n drl_lib = 'elegantrl', \n agent = 'ppo', \n cwd = './papertrading_erl_retrain', \n net_dim = 512, \n state_dim = state_dim, \n action_dim= action_dim, \n API_KEY = API_KEY, \n API_SECRET = API_SECRET, \n API_BASE_URL = API_BASE_URL, \n tech_indicator_list = INDICATORS, \n turbulence_thresh=30, \n max_stock=1e2)\npaper_trading_erl.run()\n\n\n# # Part 4: Check Portfolio Performance\n\n# In[47]:\n\n\nimport alpaca_trade_api as tradeapi\nimport exchange_calendars as tc\nimport numpy as np\nimport pandas as pd\nimport pytz\nimport yfinance as yf\nimport matplotlib.ticker as ticker\nimport matplotlib.dates as mdates\nfrom datetime import datetime as dt\nfrom finrl.plot import backtest_stats\nimport matplotlib.pyplot as plt\n\n\n# In[48]:\n\n\ndef get_trading_days(start, end):\n nyse = tc.get_calendar('NYSE')\n df = nyse.sessions_in_range(pd.Timestamp(start,tz=pytz.UTC),\n pd.Timestamp(end,tz=pytz.UTC))\n trading_days = []\n for day in df:\n trading_days.append(str(day)[:10])\n\n return trading_days\n\ndef alpaca_history(key, secret, url, start, end):\n api = tradeapi.REST(key, secret, url, 'v2')\n trading_days = get_trading_days(start, end)\n df = pd.DataFrame()\n for day in trading_days:\n df = df.append(api.get_portfolio_history(date_start = day,timeframe='5Min').df.iloc[:78])\n equities = df.equity.values\n cumu_returns = equities/equities[0]\n cumu_returns = cumu_returns[~np.isnan(cumu_returns)]\n \n return df, cumu_returns\n\ndef DIA_history(start):\n data_df = yf.download(['^DJI'],start=start, interval=\"5m\")\n data_df = data_df.iloc[48:]\n baseline_returns = data_df['Adj Close'].values/data_df['Adj Close'].values[0]\n return data_df, baseline_returns\n\n\n# ## Get cumulative return\n\n# In[50]:\n\n\nhistory_start_date='2022-04-15'\nhistory_end_date='2022-05-10'\n\ndf_erl, cumu_erl = alpaca_history(key=API_KEY, \n secret=API_SECRET, \n url=API_BASE_URL, \n start=history_start_date, #must be within 1 month\n end='2021-10-22') #change the date if error occurs\n\n\n# In[ ]:\n\n\ndf_djia, cumu_djia = DIA_history(start=history_start_date)\n\n\n# In[ ]:\n\n\nprint(df_erl)\n\n\n# In[ ]:\n\n\nprint(df_djia)\n\n\n# In[ ]:\n\n\ndf_erl.tail()\n\n\n# In[ ]:\n\n\nreturns_erl = cumu_erl -1 \nreturns_dia = cumu_djia - 1\nreturns_dia = returns_dia[:returns_erl.shape[0]]\nprint('len of erl return: ', returns_erl.shape[0])\nprint('len of dia return: ', returns_dia.shape[0])\n\n\n# ## plot and save\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\nplt.figure(dpi=1000)\nplt.grid()\nplt.grid(which='minor', axis='y')\nplt.title('Stock Trading (Paper trading)', fontsize=20)\nplt.plot(returns_erl, label = 'ElegantRL Agent', color = 'red')\n#plt.plot(returns_sb3, label = 'Stable-Baselines3 Agent', color = 'blue' )\n#plt.plot(returns_rllib, label = 'RLlib Agent', color = 'green')\nplt.plot(returns_dia, label = 'DJIA', color = 'grey')\nplt.ylabel('Return', fontsize=16)\nplt.xlabel('Year 2021', fontsize=16)\nplt.xticks(size = 14)\nplt.yticks(size = 14)\nax = plt.gca()\nax.xaxis.set_major_locator(ticker.MultipleLocator(78))\nax.xaxis.set_minor_locator(ticker.MultipleLocator(6))\nax.yaxis.set_minor_locator(ticker.MultipleLocator(0.005))\nax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=2))\nax.xaxis.set_major_formatter(ticker.FixedFormatter(['','10-19','','10-20',\n '','10-21','','10-22']))\nplt.legend(fontsize=10.5)\nplt.savefig('papertrading_stock.png')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"highwayns/FinRL","sub_path":"tutorials/3-Practical/FinRL_PaperTrading_Demo.py","file_name":"FinRL_PaperTrading_Demo.py","file_ext":"py","file_size_in_byte":22474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"12605244791","text":"#first 127 character of utf-8 has ascii\n\n\ndef test():\n try:\n filein = open('UTF8.txt', 'r', encoding='utf_8')\n fileout = open('utf8.html','w')\n outbytes = bytearray() #mutable list of bytes.\n\n for line in filein: # gets the whole line\n # print(line,end='')\n for c in line: # get each character in the line\n if ord(c)> 127:\n #print('String','this is what goes to {:04d}'.format(ord(c)))\n #s = '{:04d}'.format(ord(c))\n #print('bytes',s,'to',bytes(s,encoding='utf8'))\n outbytes += bytes('&#{:04d};'.format(ord(c)),encoding='utf_8') #bytes is immutable object. bytes of HTML format string Ť\n else: outbytes.append(ord(c))\n convertedstring = str(outbytes,encoding='utf_8')\n\n print(convertedstring,file=fileout)\n print(convertedstring)\n\n\n finally:\n filein.close()\n\n\ndef all_chars():\n fo = open('char.txt', 'w')\n\n for i in range(5000):\n print(i, chr(i))\n fo.write(chr(i) + '\\n')\n\n fo.close()\n\ndef write_chars():\n filein = open('UTF8.txt', 'a', encoding='utf_8')\n outbytes = bytearray()\n strng = None\n for i in range(3077,3200):\n outbytes += bytes(get_html_break(str(i)+'--Character of - {} \\n'.format(chr(i))),encoding='utf_8')\n print('bytes',outbytes)\n bytesstr = str(outbytes,encoding='utf_8')\n print('bytest string',bytesstr)\n filein.write(bytesstr)\n filein.close()\n\ndef add_break_decorator(func):\n def add_decorator_inner(name):\n return '
    {}'.format(func(name))\n return add_decorator_inner\n\n@add_break_decorator\ndef get_html_break(text):\n return text\n\n\n\n\nif __name__ == '__main__':\n test()\n #all_chars()\n #write_chars()\n","repo_name":"venkunikku/exercises_learning","sub_path":"PycharmProjects/HelloWorldProject/charcter-byte-bytearray.py","file_name":"charcter-byte-bytearray.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"72800996037","text":"\"\"\"\n문제\n\n자율주행팀 SW 엔지니어인 당신에게 장애물과 도로를 인식할 수 있는 프로그램을 만들라는 업무가 주어졌다.\n\n\n[그림 1] 지도 예시\n\n\n우선 [그림 1]과 같이 정사각형 모양의 지도가 있다. 1은 장애물이 있는 곳을, 0은 도로가 있는 곳을 나타낸다.\n\n\n당신은 이 지도를 가지고 연결된 장애물들의 모임인 블록을 정의하고, \n불록에 번호를 붙이려 한다. 여기서 연결되었다는 것은 어떤 장애물이 좌우, 혹은 아래위로 붙어 있는 경우를 말한다. \n대각선 상에 장애물이 있는 경우는 연결된 것이 아니다.\n\n\n\n[그림 2] 블록 별 번호 부여\n\n\n[그림 2]는 [그림 1]을 블록 별로 번호를 붙인 것이다. \n\n\n지도를 입력하여 장애물 블록수를 출력하고, 각 블록에 속하는 장애물의 수를 오름차순으로 정렬하여 출력하는 프로그램을 작성하시오.\n\n\n제약조건\nN은 정사각형임으로 가로와 세로의 크기는 같으며 5 ≤ N ≤ 25\n\n입력형식\n입력 값의 첫 번째 줄에는 지도의 크기 N이 입력되고, 그 다음 N줄에는 각각 N개의 자료(0혹은 1)가 입력된다.\n\n출력형식\n첫 번째 줄에는 총 블록 수를 출력 하시오.\n\n그리고 각 블록 내 장애물의 수를 오름차순으로 정렬하여 한 줄에 하나씩 출력하시오.\n\n입력예제1복사하기\n7\n1110111\n0110101\n0110101\n0000100\n0110000\n0111110\n0110000\n\n출력예제1\n3\n7\n8\n9\n\"\"\"\n\nimport sys\nfrom collections import deque\n\nn = int(input())\n\n#입력 예시 잘보기 (이거때문에 30분이상 잡아먹음 찾느라....)\ngrid = [\n list(map(int, str(input())))\n for _ in range(n)\n]\n\nvisited = [\n [False] * n\n for _ in range(n)\n]\n\nresult = [\n [0] * n\n for _ in range(n)\n]\n\ndef block(row, col):\n \"\"\"\n 1. grid 내부에 위치\n 1. grid 값이 1이여야지 block로 인식\n 2. visited = false\n \"\"\"\n if (0 <= row < n and 0 <= col < n) and grid[row][col] == 1 and visited[row][col] == False:\n return True\n else:\n return False\n\n\ndef bfs():\n global q\n global cnt\n\n dxs = [0, 1, 0, -1]\n dys = [1, 0, -1, 0]\n\n while q:\n row, col = q.popleft()\n for dx, dy in zip(dxs, dys):\n curr_row, curr_col = row + dx, col + dy\n if block(curr_row, curr_col):\n visited[curr_row][curr_col] = True\n result[curr_row][curr_col] = cnt\n q.append((curr_row, curr_col))\n\nq = deque()\ncnt = 0\nfor i in range(n):\n for j in range(n):\n if block(i, j):\n cnt += 1\n visited[i][j] = True\n result[i][j] = cnt\n q.append((i, j)) \n bfs()\n\nprint(cnt)\nanswer = []\nfor i in range(1, cnt + 1):\n ans = 0\n for elem in result:\n for el in elem:\n if el == i:\n ans += 1 \n\n answer.append(ans)\n\nanswer.sort()\nfor elem in answer:\n print(elem)","repo_name":"bumheeleee/problemSolvingAbility","sub_path":"level2/장애인식프로그램.py","file_name":"장애인식프로그램.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"74530228357","text":"__author__ = 'bogdan'\n\nimport numpy as np\nimport pandas as pd\n\nTRAINING_IN = \"../resources/training.csv\"\nVALIDATION_IN = \"../resources/validation.csv\"\nTESTING_IN = \"../resources/testing.csv\"\nTESTING_OUT = './out/testing_y.out'\nVALIDATION_OUT = './out/validation_y.out'\n\n#some relevant column names\nheaders = [\"feature\" + str(x) for x in range(27)]\nheaders.append(\"cls\")\n\n\ndef read_x(filename=None, header_names=None):\n if header_names is None:\n header_names = headers[:27]\n\n return pd.read_csv(filename, header=None, names=header_names)\n\n\ndef read_x_y(filename=None, header_names=None):\n if header_names is None:\n header_names = headers\n\n x_y = pd.read_csv(filename, header=None, names=header_names)\n\n return x_y\n\n\ndef write_prediction(filename, prediction_list):\n prediction_list = np.array(prediction_list)\n prediction_list.tofile(file=filename, sep='\\n')","repo_name":"bvancea/ml-project-13","sub_path":"classification/src/python/io_utils.py","file_name":"io_utils.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19163706801","text":"#!/usr/bin/env python3\n\n# no support for ssl/wss\n# not scalable\n# uses ineff. thread to handle\n# no limit on file service/danger\n\n# Other - SSL:\n# openssl genrsa -des3 -out server.orig.key 2048\n# openssl rsa -in server.orig.key -out server.key\n# openssl req -new -key server.key -out server.csr\n# openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt\n# openssl req -new -x509 -days 365 -nodes -out cert.pem -keyout cert.pem\n\n# Other - Links:\n# https://github.com/enthought/Python-2.7.3/blob/master/Lib/SimpleHTTPServer.py\n# https://github.com/enthought/Python-2.7.3/blob/master/Lib/SimpleHTTPServer.py\n# https://blog.anvileight.com/posts/simple-python-http-server/\n# https://gist.github.com/bradmontgomery/2219997\n# https://github.com/enthought/Python-2.7.3/blob/master/Lib/BaseHTTPServer.py\n# https://www.afternerd.com/blog/python-http-server/\n# https://www.acmesystems.it/python_http\n# https://github.com/pikhovkin/simple-websocket-server/blob/master/simple_websocket_server/__init__.py\n\nimport socket\nimport hashlib\nimport base64\nimport time\nimport logging\nimport sys\nimport ssl\nimport json\nfrom threading import Thread\nimport string\nimport random\nimport os\n\nif sys.version_info[0] < 3:\n raise Exception(\"Must be using Python 3\")\n\n__responses = {\n 100: ('Continue', 'Request received, please continue'),\n 101: ('Switching Protocols',\n 'Switching to new protocol; obey Upgrade header'),\n\n 200: ('OK', 'Request fulfilled, document follows'),\n 201: ('Created', 'Document created, URL follows'),\n 202: ('Accepted',\n 'Request accepted, processing continues off-line'),\n 203: ('Non-Authoritative Information', 'Request fulfilled from cache'),\n 204: ('No Content', 'Request fulfilled, nothing follows'),\n 205: ('Reset Content', 'Clear input form for further input.'),\n 206: ('Partial Content', 'Partial content follows.'),\n\n 300: ('Multiple Choices',\n 'Object has several resources -- see URI list'),\n 301: ('Moved Permanently', 'Object moved permanently -- see URI list'),\n 302: ('Found', 'Object moved temporarily -- see URI list'),\n 303: ('See Other', 'Object moved -- see Method and URL list'),\n 304: ('Not Modified',\n 'Document has not changed since given time'),\n 305: ('Use Proxy',\n 'You must use proxy specified in Location to access this '\n 'resource.'),\n 307: ('Temporary Redirect',\n 'Object moved temporarily -- see URI list'),\n\n 400: ('Bad Request',\n 'Bad request syntax or unsupported method'),\n 401: ('Unauthorized',\n 'No permission -- see authorization schemes'),\n 402: ('Payment Required',\n 'No payment -- see charging schemes'),\n 403: ('Forbidden',\n 'Request forbidden -- authorization will not help'),\n 404: ('Not Found', 'Nothing matches the given URI'),\n 405: ('Method Not Allowed',\n 'Specified method is invalid for this resource.'),\n 406: ('Not Acceptable', 'URI not available in preferred format.'),\n 407: ('Proxy Authentication Required', 'You must authenticate with '\n 'this proxy before proceeding.'),\n 408: ('Request Timeout', 'Request timed out; try again later.'),\n 409: ('Conflict', 'Request conflict.'),\n 410: ('Gone',\n 'URI no longer exists and has been permanently removed.'),\n 411: ('Length Required', 'Client must specify Content-Length.'),\n 412: ('Precondition Failed', 'Precondition in headers is false.'),\n 413: ('Request Entity Too Large', 'Entity is too large.'),\n 414: ('Request-URI Too Long', 'URI is too long.'),\n 415: ('Unsupported Media Type', 'Entity body in unsupported format.'),\n 416: ('Requested Range Not Satisfiable',\n 'Cannot satisfy request range.'),\n 417: ('Expectation Failed',\n 'Expect condition could not be satisfied.'),\n\n 500: ('Internal Server Error', 'Server got itself in trouble'),\n 501: ('Not Implemented',\n 'Server does not support this operation'),\n 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),\n 503: ('Service Unavailable',\n 'The server cannot process the request due to a high load'),\n 504: ('Gateway Timeout',\n 'The gateway server did not receive a timely response'),\n 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),\n}\n\n# every connection is handled with a thread, may not scale well, but very easy to deal with\nclass SocketHanlderThread(Thread):\n def __init__(self, socket, address, server):\n Thread.__init__(self)\n self.socket = socket\n self.address = address\n self.path = None\n self.server = server\n self.daemon = True\n\n def parse_ContentDisposition(self, header):\n extract = {}\n subname = None\n subvalue = None\n i = 0\n j = 0\n lx = len(header)\n for c in header:\n j = j + 1\n\n if c == ':' and \"identifier\" not in extract:\n extract[\"identifier\"] = header[i:j-1]\n i = j + 1\n continue\n\n if c == \"=\" and not subname:\n subname = header[i:j-1]\n i = j\n continue\n\n if c == ';' and subname and not subvalue:\n subvalue = header[i:j-1]\n extract[subname] = subvalue\n subname = None\n subvalue = None\n i = j + 1\n continue\n\n if lx == j and subname and not subvalue:\n subvalue = header[i:j]\n extract[subname] = subvalue\n subname = None\n subvalue = None\n i = j + 1\n continue\n\n if c == ';':\n extract[header[i:j-1]] = header[i:j-1]\n i = j + 1\n continue\n\n if lx == j:\n extract[header[i:j-1]] = header[i:j-1]\n continue\n return extract\n \n def run(self):\n try:\n self.requestlines = []\n self.headers = {}\n\n # just about enough 4KiB to understand the request \n request = self.socket.recv(4 * 1024)\n\n if len(request) < 5:\n return\n\n # primary request with headers extraction\n lend = 0\n lbegin = 0\n while 1:\n lbegin = lend\n lend = request.index(b'\\r\\n', lend)\n if lbegin == lend:\n lend = lend + len(b'\\r\\n')\n break\n else:\n header = (request[lbegin:lend]).decode('ascii')\n \n # todo prper header extraction\n if header.startswith('Content-Length:'):\n self.headers[\"Content-Length\"] = int(header.split()[1])\n\n elif header.startswith('Range: bytes'):\n range = header[len('Range: bytes='):]\n self.headers[\"Range-Start\"], self.headers[\"Range-End\"] = range.split('-')\n\n elif header.startswith('Content-Type:'):\n for t in header.split():\n if t.startswith('application/x-www-form-urlencoded'):\n self.headers[\"Content-Type\"] = 'application/x-www-form-urlencoded'\n\n elif t.startswith('multipart/form-data'):\n self.headers[\"Content-Type\"] = 'multipart/form-data' \n\n elif t.startswith('application/json'):\n self.headers[\"Content-Type\"] = 'application/json' \n\n elif t.startswith('charset'): # todo exctract charset\n self.headers[\"Content-Type-charset\"] = 'utf-8'\n \n elif t.startswith('boundary'): \n self.headers[\"Content-Type-boundary\"] = t.split('=')[1]\n \n elif header.startswith('Sec-WebSocket-Key:'):\n self.headers[\"Sec-WebSocket-Key\"] = header.split()[1]\n\n elif header.startswith('Authorization:'):\n self.headers[\"Authorization\"] = header.split()[2]\n\n elif header.startswith('Cookie:'):\n self.headers[\"Cookie\"] = header[len(\"Cookie: \"):]\n\n self.requestlines.append(header)\n lend = lend + len(b'\\r\\n')\n\n self.sessionid = None\n \n # additional parsing\n if \"Cookie\" in self.headers:\n q = [h.split('=') for h in self.headers['Cookie'].split()]\n cookies = dict((x,y.rstrip(';')) for x,y in q)\n if 'sessionid' in cookies:\n self.sessionid = cookies['sessionid']\n\n if \"Content-Type\" in self.headers:\n if self.headers[\"Content-Type\"] == 'application/x-www-form-urlencoded':\n self.data = request[lend:]\n\n # receive rest of expected data, once match\n while len(self.data) < self.headers[\"Content-Length\"] :\n self.socket.settimeout(30)\n self.data = self.data + self.socket.recv(4 * 1024)\n self.socket.settimeout(None)\n\n t = self.data.decode('utf-8')\n self.data = dict(item.split(\"=\") for item in t.split(\"&\"))\n\n if self.headers[\"Content-Type\"] == 'application/json':\n self.data = request[lend:]\n\n # receive rest of expected data, once match\n while len(self.data) < self.headers[\"Content-Length\"] :\n self.socket.settimeout(30)\n self.data = self.data + self.socket.recv(4 * 1024)\n self.socket.settimeout(None)\n\n t = self.data.decode('utf-8')\n self.data = json.loads(t)\n\n # https://www.w3.org/TR/html401/interact/forms.html#h-17.13.4\n # yea, not easy, boundry might change, making things more complicated\n if self.headers[\"Content-Type\"] == 'multipart/form-data':\n self.data = []\n self.content = request[lend:]\n\n while len(self.content) < self.headers[\"Content-Length\"]:\n self.socket.settimeout(30)\n self.content = self.content + self.socket.recv(4 * 1024)\n self.socket.settimeout(None)\n\n lend = 0\n lbegin = 0\n \n formdata = {}\n while 1:\n lbegin = lend\n lend = self.content.index(b'\\r\\n', lend)\n if lbegin == lend:\n lend = lend + len(b'\\r\\n')\n lbegin = lend \n lend = self.content.index(b'\\r\\n' + bytes(formdata[\"Content-Disposition-boundary\"], \"ascii\"), lend)\n formdata[\"Content\"] = self.content[lbegin:lend]\n lend = lend + len(b'\\r\\n')\n self.data.append(formdata)\n formdata = {}\n else:\n header = (self.content[lbegin:lend]).decode('ascii')\n\n if header.endswith(self.headers[\"Content-Type-boundary\"] + '--'):\n break\n \n elif header.startswith('--' + self.headers[\"Content-Type-boundary\"]):\n formdata[\"Content-Disposition-boundary\"] = '--' + self.headers[\"Content-Type-boundary\"]\n\n elif header.startswith('Content-Disposition:'):\n t = self.parse_ContentDisposition(header)\n \n if 'form-data' in t:\n formdata[\"Content-Disposition\"] = 'form-data'\n\n if 'name' in t:\n formdata[\"Content-Disposition-name\"] = t['name'].strip('\"')\n \n if 'filename' in t:\n formdata[\"Content-Disposition-filename\"] = t['filename'].strip('\"')\n\n elif header.startswith('Content-Type:'):\n formdata[\"Content-Type\"] = header.split()[1]\n\n lend = lend + len(b'\\r\\n')\n\n self.command, self.route, self.version = None, None, None\n\n # extracting verb, route, version\n if len(self.requestlines) > 0:\n logging.info(self.requestlines[0])\n words = self.requestlines[0].split()\n\n if len(words) == 3:\n self.command, self.route, self.version = words\n elif len(words) == 2:\n self.command, self.route = words\n\n self.server.endpoint.Respond(self)\n \n except BrokenPipeError as ex:\n logging.error(ex)\n \n except Exception as ex:\n logging.critical(ex)\n\n if self.server.exceptionEndpoint:\n self.server.ExceptionEndpoint.Respond(self)\n\n finally:\n self.socket.close()\n\nclass Endpoint(object):\n def __init__(self):\n self.Owner = None\n\n def randomString(self, stringLength=64):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n \n def CanRespond(self, sockth):\n return True\n\n def Respond(self, sockth):\n return True\n\n\nclass RouteEndpoint(Endpoint):\n def __init__(self, route):\n super().__init__()\n self.route = route\n\n def CanRespond(self, sockth):\n t = super().CanRespond(sockth)\n t &= sockth.route == self.route\n return t\n\n def Respond(self, sockth):\n return super().Respond(sockth)\n\nclass MultiRouteEndpoint(Endpoint):\n def __init__(self, endpoints):\n super().__init__()\n \n self.endpoints = endpoints\n \n for e in self.endpoints:\n e.Owner = self\n\n def Respond(self, sockth):\n t = super().Respond(sockth)\n\n for e in self.endpoints:\n if e.CanRespond(sockth):\n e.Respond(sockth)\n break\n\n return True\n\nclass WWWAuthenticateBasicEndpoint(Endpoint):\n def __init__(self, username, password , passEndpoint):\n super().__init__()\n self.username = username\n self.password = password\n self.sessions = {}\n self.passEndpoint = passEndpoint \n self.passEndpoint.Owner = self\n\n def Authenticate(self, sockth):\n if self.sessions[sockth.sessionid] == False:\n if 'Authorization' in sockth.headers:\n token = base64.b64decode(sockth.headers['Authorization']).decode(\"ascii\")\n \n if token == self.username + ':' + self.password:\n self.sessions[sockth.sessionid] = True\n \n return self.sessions[sockth.sessionid]\n \n def Respond(self, sockth):\n t = super().Respond(sockth)\n\n if sockth.sessionid == None or sockth.sessionid != None and sockth.sessionid not in self.sessions:\n sockth.sessionid = self.randomString()\n logging.debug('new session ' + sockth.sessionid)\n self.sessions[sockth.sessionid] = False\n\n sockth.socket.send(b'HTTP/1.1 401 Unauthorized\\r\\n')\n sockth.socket.send(b'WWW-Authenticate: Basic realm=\"User Visible Realm\", charset=\"UTF-8\"\\r\\n')\n sockth.socket.send(b'Set-Cookie: sessionid=' + bytes(str(sockth.sessionid), \"ascii\") + b'\\r\\n')\n sockth.socket.send(b'Connection: Closed\\r\\n')\n sockth.socket.send(b'\\r\\n')\n sockth.socket.send(b'')\n return False\n\n if self.Authenticate(sockth):\n self.passEndpoint.Respond(sockth)\n else:\n # if self.failEndpoint:\n # self.failEndpoint.Respond(sockth)\n # self.sessions.pop(sockth.sessionid)\n # sockth.socket.send(b'Set-Cookie: sessionid=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\\r\\n')\n # else:\n sockth.socket.send(b'HTTP/1.1 401 Unauthorized\\r\\n')\n sockth.socket.send(b'WWW-Authenticate: Basic realm=\"User Visible Realm\", charset=\"UTF-8\"\\r\\n')\n sockth.socket.send(b'Connection: Closed\\r\\n')\n sockth.socket.send(b'\\r\\n')\n sockth.socket.send(b'Login failed!')\n\n return True\n\nclass StaticEndpoint(Endpoint):\n def __init__(self, content):\n super().__init__()\n self.content = content\n\n def Respond(self, sockth):\n super().Respond(sockth)\n sockth.socket.send(b'HTTP/1.1 200 OK\\r\\n')\n sockth.socket.send(b'Content-Length: ' + bytes(str(len(self.content)), \"ascii\") + b'\\r\\n')\n sockth.socket.send(b'Connection: Closed\\r\\n')\n sockth.socket.send(b'\\r\\n')\n sockth.socket.send(bytes(self.content, \"ascii\"))\n\n\nclass StaticFileEndpoint(Endpoint):\n def __init__(self, path, cache = False):\n super().__init__()\n self.path = path\n\n def CanRespond(self, sockth):\n t = super().CanRespond(sockth)\n t &= os.path.exists(self.path + sockth.route)\n return t\n\n def Respond(self, sockth):\n lenFile = 0\n partial = False\n \n start = 0\n end = 0\n lenBuff = 16*1024*1024\n\n if 'Range-Start' in sockth.headers:\n start = int(sockth.headers['Range-Start'])\n \n if 'Range-End' in sockth.headers:\n if sockth.headers['Range-End'] != '':\n end = int(sockth.headers['Range-End'])\n lenBuff = end - start + 1\n\n try:\n lenFile = os.path.getsize(self.path + sockth.route)\n\n # todo implement larger file streaming\n if lenFile > lenBuff:\n partial = True\n \n with open(self.path + sockth.route, 'rb') as f:\n f.seek(start)\n content = f.read(lenBuff)\n lenBuff = len(content)\n\n if partial and end == 0:\n end = start + lenBuff - 1\n\n except FileNotFoundError as ex:\n logging.critical(ex)\n sockth.exception = ex\n sockth.socket.send(b'HTTP/1.1 404 Not Found\\r\\n')\n return False\n\n mime = 'application/octet-stream'\n \n # use import mimetypes if gets complicated\n if sockth.route.endswith(\".html\"):\n mime = 'text/html'\n elif sockth.route.endswith(\".ico\"):\n mime = 'image/x-icon'\n elif sockth.route.endswith(\".css\"):\n mime = 'text/css'\n elif sockth.route.endswith(\".jpg\"):\n mime = 'image/jpeg'\n elif sockth.route.endswith(\".js\"):\n mime = 'application/javascript'\n elif sockth.route.endswith(\".mp4\"):\n mime = 'video/mp4'\n\n if partial:\n sockth.socket.send(b'HTTP/1.1 206 Partial Content\\r\\n')\n else:\n sockth.socket.send(b'HTTP/1.1 200 OK\\r\\n')\n \n if partial:\n sockth.socket.send(b'Accept-Ranges: bytes\\r\\n')\n \n sockth.socket.send(b'Content-Length: ' + bytes(str(lenBuff), \"ascii\") + b'\\r\\n')\n sockth.socket.send(b'Content-Type: ' + bytes(mime, \"ascii\") + b'\\r\\n')\n \n if partial:\n sockth.socket.send(b'Content-Range: bytes ' + bytes(str(start), \"ascii\") + b'-' + bytes(str(end), \"ascii\") + b'/' + bytes(str(lenFile), \"ascii\") + b'\\r\\n')\n\n \n sockth.socket.send(b'Connection: Closed\\r\\n')\n sockth.socket.send(b'\\r\\n')\n sockth.socket.send(content)\n\n return True\n\nclass WebServiceEndpoint(RouteEndpoint):\n def __init__(self, route, callback):\n super().__init__(route)\n self.callback = callback\n\n def OnReady(self, sockth):\n # urlparse.parse_qs(\"Name1=Value1;Name2=Value2;Name3=Value3\")\n method = self.callback\n content = method(**sockth.data)\n lenContent = bytes(str(len(content)), \"ascii\")\n sockth.socket.send(b'HTTP/1.1 200 OK\\r\\n')\n sockth.socket.send(b'Content-Length: ' + lenContent + b'\\r\\n')\n sockth.socket.send(b'Connection: Closed\\r\\n')\n sockth.socket.send(b'\\r\\n')\n \n if content:\n sockth.socket.send(content)\n \n def Respond(self, sockth):\n if not super().Respond(sockth):\n return False \n self.OnReady(sockth)\n\nclass ServerSentEventEndpoint(RouteEndpoint):\n # https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events\n # * IE, Edge Not supported\n # event: A string identifying the type of event described. If this is specified, an event will be dispatched on the browser to the listener for the specified event name;\n # the website source code should use addEventListener() to listen for named events. The onmessage handler is called if no event name is specified for a message.\n # data: The data field for the message. When the EventSource receives multiple consecutive lines that begin with data:,\n # it will concatenate them, inserting a newline character between each one. Trailing newlines are removed.\n # id: The event ID to set the EventSource object's last event ID value.\n # retry: The reconnection time to use when attempting to send the event. This must be an integer, specifying the reconnection time in milliseconds.\n # If a non-integer value is specified, the field is ignored.\n def __init__(self, route):\n super().__init__(route)\n\n def Send(self, sockth, id, event, data, retry):\n if id:\n sockth.socket.send(b'id: ' + id + b'\\r\\n')\n\n if event:\n sockth.socket.send(b'event: ' + event + b'\\r\\n')\n\n if data:\n sockth.socket.send(b'data: ' + data + b'\\r\\n')\n \n if retry:\n sockth.socket.send(b'retry: ' + retry + b'\\r\\n')\n\n sockth.socket.send(b'\\r\\n')\n\n def OnReady(self, sockth):\n while 1:\n self.Send(sockth, None, None, b'Server Side Event - default message', None)\n time.sleep(1)\n self.Send(sockth, None, b'customevent', b'Server Side Event - customevent', None)\n time.sleep(1)\n\n def Respond(self, sockth):\n if not super().Respond(sockth):\n return False\n \n sockth.socket.send(b'HTTP/1.1 200 OK\\r\\n')\n sockth.socket.send(b'Content-Type: text/event-stream\\r\\n')\n sockth.socket.send(b'Cache-Control: no-cache\\r\\n')\n sockth.socket.send(b'\\r\\n')\n\n self.OnReady(sockth)\n\nclass WebSocketEndpoint(RouteEndpoint):\n def __init__(self, route, onReady):\n super().__init__(route)\n self.onReady = onReady\n\n def Receive(self, sockth):\n # todo implement big packets recev\n # partial recv looks like wont happen, how ever multiple packets may need to be received and combined\n request = sockth.socket.recv(4096)\n fin = request[0] & 0x80 == 128\n opcode = request[0] & (0xF)\n Mask = request[1] & 0x80 == 128\n\n payload = bytearray()\n plMask = None\n plFlag = request[1] & 0x7F\n plLen = 0\n plStart = 0\n\n if plFlag < 126:\n plLen = plFlag\n plStart = 2\n if Mask:\n plMask = [request[2], request[3], request[4], request[5]]\n plStart = 2 + 4\n\n elif plFlag == 126:\n plLen = (request[2] << 8) + request[3]\n plStart = 4\n if Mask:\n plMask = [request[4], request[5], request[6], request[7]]\n plStart = 4 + 4\n\n elif plFlag == 127:\n plLen = (request[2] << 24) + (request[3] <<\n 16) + (request[4] << 8) + request[5]\n plStart = 6\n if Mask:\n plMask = [request[6], request[7], request[8], request[9]]\n plStart = 6 + 4\n\n i = 0\n for b in request[plStart:]:\n if Mask:\n payload.append(b ^ plMask[i % 4])\n else:\n payload.append(b)\n i = i + 1\n plLen = plLen - 1\n return payload\n\n def Send(self, sockth, payload):\n pllen = len(payload)\n plbytes = bytearray()\n b1, b2 = 0, 0\n opcode = 0x1\n fin = 1\n b1 = opcode | fin << 7\n\n if pllen < 125:\n b2 |= pllen\n\n elif pllen > 124:\n raise NotImplementedError\n\n plbytes.append(b1)\n plbytes.append(b2)\n plbytes.extend(payload)\n sockth.socket.send(plbytes)\n\n def OnReady(self, sockth):\n while 1:\n response = self.Receive(sockth)\n if response:\n self.Send(sockth, response)\n\n def Respond(self, sockth):\n if not super().Respond(sockth):\n return False\n\n if \"Sec-WebSocket-Key\" in sockth.headers:\n skey = sockth.headers[\"Sec-WebSocket-Key\"]\n stoken = skey + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' # MAGICSTRING\n stokensha1 = hashlib.sha1(stoken.encode('utf-8'))\n secWebSocketAccept = base64.b64encode(stokensha1.digest())\n sockth.socket.send(b'HTTP/1.1 101 Switching Protocols\\r\\n')\n sockth.socket.send(b'Upgrade: websocket\\r\\n')\n sockth.socket.send(b'Connection: Upgrade\\r\\n')\n sockth.socket.send(b'Sec-WebSocket-Accept: ' + secWebSocketAccept + b'\\r\\n')\n sockth.socket.send(b'\\r\\n')\n self.OnReady(sockth)\n else:\n sockth.exception = Exception(\"No Sec-WebSocket-Key in headers\")\n\nclass PythonWebCore(Thread):\n def __init__(self, hostname, port = 80, cert = None):\n Thread.__init__(self)\n self.endpoint = None\n self.hostname = hostname\n self.port = port\n self.daemon = True\n self.listening = False\n self.cert = cert\n self.exceptionEndpoint = None\n\n def RegisterEndpoint(self, endpoint):\n logging.debug('registering endpoint ' + type(endpoint).__name__)\n self.endpoint = endpoint\n return endpoint\n\n def stop(self):\n \"\"\"\n properly kills the process: https://stackoverflow.com/a/16736227/4225229\n \"\"\"\n self.listening = False\n time.sleep(1)\n\n # connect again to release the listener for terminating the connection\n t = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n t.connect((self.hostname, self.port))\n t.close()\n\n def run(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socksrvr:\n socksrvr.bind((self.hostname, self.port))\n socksrvr.listen(5) # max backlog of connections\n self.listening = True\n\n logging.info('listening on ' + self.hostname + ':' + str(self.port))\n\n while self.listening:\n try:\n sockclient, addr = socksrvr.accept()\n \n if self.cert:\n try:\n sockclient = ssl.wrap_socket(sockclient, certfile = self.cert, server_side = True)\n\n except Exception as ex:\n logging.critical(ex)\n\n # client rejects the certifcate?\n if ex.args[1].startswith('[SSL: SSLV3_ALERT_CERTIFICATE_UNKNOWN]'):\n continue\n\n # a regular socket was connect to secure endpoint\n if ex.args[1].startswith('[SSL: HTTP_REQUEST]'):\n continue\n\n logging.debug('connect ' + ':'.join(str(x) for x in addr))\n SocketHanlderThread(sockclient, addr, self).start()\n \n except Exception as ex:\n logging.critical(ex)\n\n socksrvr.close()\n break\n\nif __name__ == '__main__':\n def TestSocket(payload):\n p = payload.decode(\"ascii\")\n logging.debug(p)\n return bytes(p, \"utf-8\")\n\n def TestAjax(param1, param2, param3):\n logging.debug('%s %s %s ', param1, param2, param3)\n return b'OK'\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - [%(levelname)-5.5s] - %(message)s')\n\n pwc = PythonWebCore('', 65125)\n\n # pwc.RegisterEndpoint()\n\n #\n # # ws.OnReady = TestCustomOnReady\n\n from os import path as ospath\n ep_staticfile = StaticFileEndpoint(ospath.dirname(ospath.abspath(__file__)) + '/contents')\n ep_content = StaticEndpoint('Login Failed!')\n ep_webservice = WebServiceEndpoint('/TestAjax', TestAjax)\n ep_serverevent = ServerSentEventEndpoint('/TestSSE')\n ep_websocket = WebSocketEndpoint('/TestSocket', TestSocket)\n ep_multiroute = MultiRouteEndpoint([ep_webservice, ep_serverevent, ep_websocket, ep_staticfile])\n ep_authenticate = WWWAuthenticateBasicEndpoint(\"admin\", \"admin\", ep_multiroute)\n \n pwc.RegisterEndpoint(ep_authenticate)\n\n pwc.start()\n\n logging.debug('http://localhost:' + str(pwc.port) + '/PythonWebCore.html' + ' login using admin/admin')\n logging.info('Press Ctrl+C to terminate.')\n\n try:\n pwc.join()\n \n except KeyboardInterrupt as ex:\n logging.critical(ex)\n pwc.stop()\n pwc.join()\n\n except Exception as ex:\n logging.critical(ex)\n pwc.stop()\n pwc.join()","repo_name":"aaramians/SimplePyWebServer","sub_path":"PythonWebCore.py","file_name":"PythonWebCore.py","file_ext":"py","file_size_in_byte":30119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4015515174","text":"from scipy.stats import norm\n\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport scipy as sp\nimport pdb\nfrom sklearn.cluster import KMeans\n\n\n\n\n# [0] #means line 0 of your matrix\n# [(0,0)] #means cell at 0,0 of your matrix\n# [0:1] #means lines 0 to 1 excluded of your matrix\n# [:1] #excluding the first value means all lines until line 1 excluded\n# [1:] #excluding the last param mean all lines starting form line 1 included\n# [:] #excluding both means all lines\n# [::2] #the addition of a second ':' is the sampling. (1 item every 2)\n# [::] #exluding it means a sampling of 1\n# [:,:] #simply uses a tuple (a single , represents an empty tuple) instead of an index.\n\n\nCHART_DIR = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"charts\")\nif not os.path.exists(CHART_DIR):\n os.mkdir(CHART_DIR)\n\nnum_clusters = 3\nseed = 2\n\n\ndef createNormalizedDataSets():\n xw1 = norm(loc=0.3, scale=.15).rvs(20)\n yw1 = norm(loc=0.3, scale=.15).rvs(20)\n\n xw2 = norm(loc=0.7, scale=.15).rvs(20)\n yw2 = norm(loc=0.7, scale=.15).rvs(20)\n\n xw3 = norm(loc=0.2, scale=.15).rvs(20)\n yw3 = norm(loc=0.8, scale=.15).rvs(20)\n\n x = sp.append(sp.append(xw1, xw2), xw3)\n y = sp.append(sp.append(yw1, yw2), yw3)\n\n return x, y\n\n# define the scope\nrangeScope = np.arange(0, 1, 0.001)\nXscope, Yscope = np.meshgrid(rangeScope, rangeScope)\n\n# create random dataset of 2 vectors, one for each word\nxDataset, yDataset = createNormalizedDataSets()\ntraining = np.array(list(zip(xDataset, yDataset)))\n\n# create a testing dataset from meshgrid\ntesting = np.vstack([Xscope.ravel(), Yscope.ravel()]).T\n\n\nclass Plotter():\n def main(self):\n\n # plot the vectors\n self.plot_chart(\"Vectors\")\n\n # plot the clusters iteration\n for i in [1, 2, 10]:\n prediction, centers = self.predictKMeans(i)\n predictionReshaped = prediction.reshape(Xscope.shape)\n self.plot_chart(\"Iteration_\" + str(i), predictionReshaped, centers)\n\n def predictKMeans(self, iteration):\n km = KMeans(init=\"random\", n_clusters=num_clusters, verbose=1, \\\n n_init=1, max_iter=iteration, random_state=seed)\n km.fit(training)\n\n return km.predict(testing), km.cluster_centers_\n\n def plot_chart(self, title, C=None, centers=None):\n plt.figure(num=None, figsize=(8, 6))\n plt.clf()\n\n if C != None:\n plt.pcolormesh(Xscope, Yscope, C, cmap=plt.cm.Blues)\n for i in range(len(centers)):\n plt.scatter(centers[i][0], centers[i][1], s=10, marker='x', linewidth=2, color='g')\n\n plt.scatter(xDataset, yDataset, s=3, color='r')\n plt.title(title)\n plt.xlabel(\"word1\")\n plt.ylabel(\"word2\")\n plt.xlim()\n plt.xlim(Xscope.min(), Xscope.max())\n plt.ylim(Yscope.min(), Yscope.max())\n plt.autoscale(tight=True)\n plt.grid(True, linestyle='-', color='0.75')\n plt.savefig(os.path.join(CHART_DIR, title))\n\n\nif __name__ == \"__main__\":\n Plotter().main()\n","repo_name":"clementlefevre/Matching-her-lines","sub_path":"main/ch03/plot_cluster_clement.py","file_name":"plot_cluster_clement.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5159720293","text":"\r\n#폴더를 만들거나 삭제 또는 이동, 파일 수정, 삭제등은 os영역이다. \r\n\r\n# datasets\r\n# ㄴ cats_and_dogs_small \r\n# ㄴ train \r\n# ㄴ cats 폴더 순서대로 자동으로 라벨로 인신한다 \r\n# ㄴ dogs \r\n# ㄴ test #cats와 dogs 만들어야 함 \r\n# ㄴ validation cats와 dogs 만들어야 함 \r\n\r\n\r\nimport os, shutil \r\nfrom keras import layers\r\nfrom keras import models\r\nfrom keras.models import load_model\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras import optimizers\r\nimport pickle\r\nfrom keras.preprocessing import image\r\n\r\noriginal_dataset_dir='./datasets/cats_and_dogs/train'\r\nbase_dir ='./datasets/cats_and_dogs_small'\r\ntest_dir = base_dir+ '/test' #훈련용\r\ntrain_dir = base_dir+ '/train'#훈련용\r\nvalid_dir = base_dir+ '/validation'#훈련용\r\n\r\n\r\ntrain_cats_dir = train_dir + \"/cats\"\r\ntrain_dogs_dir = train_dir + \"/dogs\"\r\n\r\ntest_cats_dir = test_dir+\"/cats\"\r\ntest_dogs_dir = test_dir+ \"/dogs\"\r\n\r\nvalid_cats_dir = valid_dir +\"/cats\"\r\nvalid_dogs_dir = valid_dir +\"/dogs\"\r\n#다운로드받은 이미지 있는곳 \r\n\r\ndef ImageDistribution():\r\n #소규모 데이터셋을 저장하는 디렉토리\r\n if( os.path.exists(base_dir)): #이미 경로가 존재하면 \r\n shutil.rmtree(base_dir )\r\n #이미지 포함 base_dir 아래의 모든 경로 삭제 \r\n os.mkdir(base_dir) #디렉토리 만들기 \r\n os.mkdir(train_dir)\r\n os.mkdir(valid_dir)\r\n os.mkdir(test_dir)\r\n os.mkdir(train_cats_dir)\r\n os.mkdir(train_dogs_dir)\r\n os.mkdir(test_cats_dir)\r\n os.mkdir(test_dogs_dir)\r\n os.mkdir(valid_cats_dir)\r\n os.mkdir(valid_dogs_dir)\r\n \r\n\r\n #파일 복사하기 \r\n #디렉토리내의 파일 개수 조사하기 \r\n totalCount = len( os.listdir(original_dataset_dir))\r\n #os.listdir(경로명 ) 해당 경로에 있는 파일 목록을 가져온다 \r\n print(\"전체 개수 :\", totalCount)\r\n ImageCopy(train_cats_dir, 0, 1000, \"cat\")\r\n ImageCopy(train_dogs_dir, 0, 1000, \"dog\")\r\n ImageCopy(test_cats_dir, 1000, 1500, \"cat\")\r\n ImageCopy(test_cats_dir, 1000, 1500, \"dog\")\r\n ImageCopy(valid_cats_dir,1500, 2000, \"cat\")\r\n ImageCopy(valid_dogs_dir,1500, 2000, \"dog\")\r\n \r\n\r\ndef ImageCopy(destdir, start, end, imagename):\r\n # 파일명들이 cat.0.jpg, cat.1.jpg... 파일이름을 만들어내야 한다\r\n fnames = [\"{}.{}.jpg\".format(imagename,i) for i in range(start,end)]\r\n print(fnames[:5])\r\n for fname in fnames:\r\n src = original_dataset_dir+\"/\"+fname # 원본파일명\r\n dest = destdir+\"/\"+fname # 복사할 파일명\r\n shutil.copyfile(src,dest)\r\n\r\n\r\n# 모델 구성하기\r\ndef makeModel():\r\n model = models.Sequential()\r\n # 컨버넷 추가 - 컨보넷은 3차원 이미지의 특성을 추출한다.\r\n # 보통 3,3 또는 5,5 필터를 사용한다\r\n # 3d tensor를 입력으로 사용한다\r\n # 32 -> 출력할때 unit의 개수\r\n\r\n model.add(layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(150,150,3)))\r\n\r\n model.add(layers.MaxPooling2D(2,2))\r\n model.add(layers.Conv2D(64, (5,5), activation=\"relu\"))\r\n model.add(layers.MaxPooling2D(2,2))\r\n\r\n model.add(layers.Conv2D(128, (3,3), activation=\"relu\"))\r\n model.add(layers.MaxPooling2D(2,2))\r\n\r\n model.add(layers.Conv2D(128, (3,3), activation=\"relu\"))\r\n model.add(layers.MaxPooling2D(2,2))\r\n\r\n model.add(layers.Flatten())\r\n\r\n # 출력층\r\n model.add(layers.Dense(1, activation=\"sigmoid\"))\r\n \r\n model.compile(loss=\"binary_crossentropy\",\r\n optimizer=optimizers.RMSprop(lr=1e-4),\r\n metrics=[\"acc\"])\r\n\r\n # 모델 구조 출력하기\r\n model.summary()\r\n return model\r\n\r\n# 데이터 전처리\r\ndef dataPreprocessing():\r\n train_datagen = ImageDataGenerator(rescale=1./255)\r\n test_datagen = ImageDataGenerator(rescale=1./255)\r\n \r\n train_generator = train_datagen.flow_from_directory(\r\n train_dir, # target 디렉토리\r\n target_size=(150,150),\r\n batch_size=20,\r\n class_mode=\"binary\"\r\n )\r\n\r\n validation_generator = test_datagen.flow_from_directory(\r\n valid_dir,\r\n target_size=(150,150),\r\n batch_size=20,\r\n class_mode=\"binary\"\r\n )\r\n\r\n # 예측 - test_generator\r\n test_generator = test_datagen.flow_from_directory(\r\n test_dir,\r\n target_size=(150,150),\r\n batch_size=20,\r\n class_mode=\"binary\"\r\n )\r\n \r\n # 학습 - ImageGenerator 쓸때는 fit_generator 함수 사용\r\n model = makeModel()\r\n\r\n history = model.fit_generator(train_generator,\r\n steps_per_epoch=100,\r\n epochs=100,\r\n validation_data = validation_generator,\r\n validation_steps = 50)\r\n\r\n # 학습모델 저장하기\r\n model.save(\"dats_and_dogs_small_2.h5\")\r\n\r\n # history 는 pickle을 이용해서 저장해보자\r\n \r\n file = open(\"cats_and_dogs.hist\", \"wb\")\r\n pickle.dump(history, file=file)\r\n file.close()\r\n\r\ndef Predict():\r\n model = load_model(\"cats_and_dogs_small_2.h5\")\r\n file = open(\"cats_and_dogs.hist\",\"rb\")\r\n history = pickle.load(file)\r\n file.close()\r\n drawChart(history)\r\n\r\n\"\"\"\r\n print(\"개 : {} 고양이 : {}\".format(dogcount, catcount))\r\n\r\n score = model.evaluate_generator(test_generator, steps=50)\r\n print(\"Test loss : \", score[0])\r\n print(\"Test accuracy : \", score[1])\r\n\r\n\"\"\"\r\n\r\ndef drawChart(history):\r\n acc = history.history[\"acc\"]\r\n val_acc = history.history[\"val_acc\"]\r\n loss= history.history[\"loss\"]\r\n val_loss = history.history[\"val_loss\"]\r\n\r\n epochs = range(1, len(acc)+1)\r\n\r\n plt.plot(epochs, acc, \"bo\", label = \"Training acc\")\r\n plt.plot(epochs, val_acc, \"b\", label = \"Validation acc\")\r\n plt.title(\"Training and Validation Accuarcy\")\r\n plt.legend()\r\n plt.show()\r\n\r\n plt.figure() # 차트 refresh 다시그림\r\n plt.plot(epochs, loss, \"bo\", label = \"Training loss\")\r\n plt.plot(epochs, val_loss, \"b\", label = \"Training loss\")\r\n plt.title(\"Training and Validation Loss\")\r\n plt.legend()\r\n plt.show()\r\n\r\ndef ImageIncrease():\r\n #os.listdir(경로명)해당 폴더의 파일 목록을 가져온다\r\n filenamelist = os.listdir(train_cats_dir)\r\n print(filenamelist[:20])\r\n\r\n #경로 포함한 파이명\r\n filename = [train_cats_dir + \"/\" + fname for fname in filenamelist]\r\n print(filename[:20])\r\n\r\n increaseImage = filename[3] #0번 이미지를 증식해보자\r\n img = image.load_img(increaseImage)\r\n plt.imshow(img)\r\n plt.title=\"priginal image\"\r\n plt.show()\r\n \r\n #1.이미지를 numpy 배열로 바꿔야 한다\r\n data = image.img_to_array(img) # 150, 150, 3\r\n print(data)\r\n #2.원하는 차원으로 재가공\r\n data = data.reshape( (1,) + data.shape) # 1. 150,150,3\r\n print(data)\r\n #3.이미지 가공 객체 만들기\r\n datagen = ImageDataGenerator(\r\n rotation_range=20,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n shear_range=0.1,\r\n zoom_range=0.1,\r\n horizontal_flip=True,\r\n fill_mode=\"nearest\"\r\n )\r\n\r\n #4. ImageDataGenerator 객체로부터 새오룬 이미지 만들기\r\n #냅두면 계속 생성한다. 그래서 i를 카운트로 두고 4개 만들면\r\n #그만 만들게 하였음\r\n i = 0\r\n for batch in datagen.flow( data, batch_size=1):\r\n plt.figure(i) # i번쨰 위치에\r\n # datagen객체가 생성하는 이미지는 배열임\r\n #배열을 다시 이미지로 환원해야 한다 .array_to_image\r\n plt.imshow(image.array_to_img(batch[0]))\r\n i = i + 1\r\n if i%10 == 0:\r\n break\r\n plt.show()\r\n\r\ndef DataIncreaseFit():\r\n\r\n model = makeModel()\r\n\r\n #trainge\r\n datagen = ImageDataGenerator(\r\n rescale=.1/255,\r\n rotation_range=20,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n shear_range=0.1,\r\n zoom_range=0.1,\r\n horizontal_flip=True,\r\n fill_mode=\"nearest\"\r\n )\r\n\r\n test_datagen = ImageDataGenerator(.1/255)\r\n train_generator = datagen.flow_from_directory(\r\n train_dir,\r\n target_size=(150,150),\r\n batch_size=20, # batch_size와 steps_per_epoch 두개를 곱한개수\r\n # 1000개 넘으면 나머지 이미지는 자기가 증식\r\n class_mode=\"binary\"\r\n )\r\n\r\n validation_generator = test_datagen.flow_from_directory(\r\n valid_dir,\r\n target_size=(150,150),\r\n batch_size=10,\r\n class_mode=\"binary\"\r\n )\r\n\r\n test_generator = test_datagen.flow_from_directory(\r\n test_dir,\r\n target_size=(150,150),\r\n batch_size=10,\r\n class_mode=\"binary\"\r\n )\r\n\r\n history = model.fit_generator(\r\n train_generator,\r\n steps_per_epoch=100,\r\n epochs = 100,\r\n validation_data = validation_generator,\r\n validation_steps=50\r\n )\r\n\r\n # 학습모델 저장하기\r\n # model.save(\"dats_and_dogs_small_3.h5\")\r\n\r\n # history 는 pickle을 이용해서 저장해보자\r\n import pickle\r\n # file = open(\"cats_and_dogs2.hist\", \"wb\")\r\n pickle.dump(history, file=file)\r\n file.close()\r\n \r\n # 차트 그리기\r\n drawChart(history)\r\n\r\nif __name__ == '__main__':\r\n # ImageDistribution()\r\n dataPreprocessing()\r\n # Predict()\r\n # ImageIncrease()\r\n # DataIncreaseFit()","repo_name":"tkqksi731/AI-study","sub_path":"개고양이분류.py","file_name":"개고양이분류.py","file_ext":"py","file_size_in_byte":9477,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10677835648","text":"import re\nimport os\nimport random\n\nimport requests\nfrom django.conf import settings\nfrom django.core.cache import cache\n\nfrom swiper import config\nfrom libs.qncloud import qn_upload\nfrom worker import celery_app\nfrom common import keys\n\n\ndef is_phonenum(phonenum):\n '''检查是否是一个正常的手机号'''\n if re.match(r'1[3456789]\\d{9}$', phonenum):\n return True\n else:\n return False\n\n\ndef gen_random_code(length=4):\n '''产生一个指定长度的随机码'''\n rand_num = random.randrange(0, 10 ** length)\n template = '%%0%sd' % length\n vcode = template % rand_num\n return vcode\n\n\ndef send_sms(phonenum, vcode):\n '''发送短信'''\n args = config.YZX_SMS_ARGS.copy() # 原型模式\n args['param'] = vcode\n args['mobile'] = phonenum\n\n response = requests.post(config.YZX_SMS_API, json=args)\n return response\n\n\ndef send_vcode(phonenum):\n '''发送验证码'''\n vcode = gen_random_code(4) # 产生一个随机的验证码\n print('->', vcode)\n response = send_sms(phonenum, vcode) # 发送验证码\n\n # 检查发送状态是否成功\n if response.status_code == 200:\n result = response.json()\n if result.get('code') == '000000':\n # 将验证码添加到缓存\n key = keys.VCODE_KEY % phonenum\n cache.set(key, vcode, 180)\n return True\n\n return False\n\n\ndef save_upload_file(uid, upload_file):\n '''保存上传的文件'''\n filename = 'Avatar-%s' % uid\n fullpath = os.path.join(settings.BASE_DIR, settings.MEDIA_ROOT, filename)\n with open(fullpath, 'wb') as fp:\n for chunk in upload_file.chunks():\n fp.write(chunk)\n return fullpath, filename\n\n\n@celery_app.task\ndef save_avatar(user, avatar_file):\n '''上传用户头像'''\n # 将文件保存到本地\n fullpath, filename = save_upload_file(user.id, avatar_file)\n # 将文件上传到七牛云\n _, avatar_url = qn_upload(filename, fullpath)\n # 将 URL 保存到 UserModel\n user.avatar = avatar_url\n user.save()\n","repo_name":"v118/swiper","sub_path":"user/logics.py","file_name":"logics.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7193930569","text":"class Solution:\n def minScore(self, n: int, roads: List[List[int]]) -> int:\n parent = { node: node for node in range(1, n+1)}\n size = { node: 1 for node in range(1, n+1)}\n minimum = { node: float(inf) for node in range(1, n+1)}\n \n def find(node):\n if parent[node] == node:\n return node\n \n nodeParent = find(parent[node])\n parent[node] = nodeParent\n \n return nodeParent\n \n def union(node1, node2, cost):\n node1Rep = find(node1) \n node2Rep = find(node2)\n size1 = size[node1Rep]\n size2 = size[node2Rep]\n \n if size1 >= size2:\n parent[node2Rep] = node1Rep\n size[node1Rep] += size2\n size[node2Rep] \n minimum[node1Rep] = min(cost, min(minimum[node1Rep], minimum[node2Rep]))\n\n else:\n parent[node1Rep] = node2Rep\n size[node2Rep] += size1\n size[node1Rep] = 1\n minimum[node2Rep] = min(cost, min(minimum[node1Rep], minimum[node2Rep]))\n\n \n for edge in roads:\n union(edge[0], edge[1], edge[2])\n \n sourceParent = find(1)\n destinationParent = find(n)\n \n return minimum[sourceParent]","repo_name":"NaolAklilu/A2SV","sub_path":"2492-minimum-score-of-a-path-between-two-cities/2492-minimum-score-of-a-path-between-two-cities.py","file_name":"2492-minimum-score-of-a-path-between-two-cities.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"42458929350","text":"\"\"\"\nGeneral utility code for test-time interraction with a SeqNN model.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pdb\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nfrom basenji.dna_io import hot1_augment\nfrom basenji import accuracy\n\n\nclass SeqNNModel(object):\n\n def build_grads(self, layers=[0], center=False):\n ''' Build gradient ops for predictions summed across the sequence for\n each target with respect to some set of layers.\n In\n layers: Optional layer subset list\n '''\n\n self.grad_layers = layers\n self.grad_ops = []\n\n ci = self.preds_length // 2\n\n for ti in range(self.hp.num_targets):\n if center:\n grad_ti_op = tf.gradients(self.preds_train[:,ci:ci+2,ti], [self.layer_reprs[li] for li in self.grad_layers])\n else:\n grad_ti_op = tf.gradients(self.preds_train[:,:,ti], [self.layer_reprs[li] for li in self.grad_layers])\n self.grad_ops.append(grad_ti_op)\n\n\n def build_grads_genes(self, gene_seqs, layers=[0]):\n ''' Build gradient ops for TSS position-specific predictions\n for each target with respect to some set of layers.\n In\n gene_seqs: GeneSeq list, from which to extract TSS positions\n layers: Layer subset list.\n '''\n\n # save layer indexes\n self.grad_layers = layers\n\n # initialize ops\n self.grad_pos_ops = []\n\n # determine TSS positions\n tss_pos = set()\n for gene_seq in gene_seqs:\n for tss in gene_seq.tss_list:\n tss_pos.add(tss.seq_bin(width=self.hp.target_pool,\n pred_buffer=self.hp.batch_buffer))\n\n # for each position\n for pi in range(self.preds_length):\n self.grad_pos_ops.append([])\n\n # if it's a TSS position\n if pi in tss_pos:\n # build position-specific, target-specific gradient ops\n for ti in range(self.hp.num_targets):\n grad_piti_op = tf.gradients(self.preds_eval[:,pi,ti],\n [self.layer_reprs[li] for li in self.grad_layers])\n self.grad_pos_ops[-1].append(grad_piti_op)\n\n\n def gradients(self,\n sess,\n batcher,\n rc=False,\n shifts=[0],\n mc_n=0,\n return_all=False):\n \"\"\" Compute predictions on a test set.\n\n In\n sess: TensorFlow session\n batcher: Batcher class with sequence(s)\n rc: Average predictions from the forward and reverse complement sequences.\n shifts:\n mc_n:\n return_all: Return all ensemble predictions.\n\n Out\n layer_grads: [S (sequences) x T (targets) x P (seq position) x U (Units layer i) array] * (L layers)\n layer_reprs: [S (sequences) x P (seq position) x U (Units layer i) array] * (L layers)\n preds:\n \"\"\"\n\n #######################################################################\n # determine ensemble iteration parameters\n\n ensemble_fwdrc = []\n ensemble_shifts = []\n for shift in shifts:\n ensemble_fwdrc.append(True)\n ensemble_shifts.append(shift)\n if rc:\n ensemble_fwdrc.append(False)\n ensemble_shifts.append(shift)\n\n if mc_n > 0:\n # setup feed dict\n fd = self.set_mode('test_mc')\n\n else:\n # setup feed dict\n fd = self.set_mode('test')\n\n # co-opt the variable to represent\n # iterations per fwdrc/shift.\n mc_n = 1\n\n # total ensemble predictions\n all_n = mc_n * len(ensemble_fwdrc)\n\n\n #######################################################################\n # initialize data structures\n\n # initialize gradients\n # (I need a list for layers because the sizes are different within)\n # (Targets up front, because I need to run their ops one by one)\n layer_reprs = []\n layer_grads = []\n layer_reprs_all = []\n layer_grads_all = []\n\n for lii in range(len(self.grad_layers)):\n li = self.grad_layers[lii]\n layer_seq_len = self.layer_reprs[li].shape[1].value\n layer_units = self.layer_reprs[li].shape[2].value\n\n lr = np.zeros((batcher.num_seqs, layer_seq_len, layer_units),\n dtype='float32')\n layer_reprs.append(lr)\n\n lg = np.zeros((self.hp.num_targets, batcher.num_seqs,\n layer_seq_len, layer_units),\n dtype='float32')\n layer_grads.append(lg)\n\n if return_all:\n lra = np.zeros((batcher.num_seqs, layer_seq_len, layer_units, all_n),\n dtype='float32')\n layer_reprs_all.append(lra)\n\n lgr = np.zeros((self.hp.num_targets, batcher.num_seqs,\n layer_seq_len, layer_units, all_n),\n dtype='float32')\n layer_grads_all.append(lgr)\n\n\n # initialize predictions\n preds = np.zeros((batcher.num_seqs, self.preds_length, self.hp.num_targets),\n dtype='float32')\n\n if return_all:\n preds_all = np.zeros((batcher.num_seqs, self.preds_length,\n self.hp.num_targets, all_n),\n dtype='float32')\n\n #######################################################################\n # compute\n\n # sequence index\n si = 0\n\n # get first batch\n Xb, _, _, Nb = batcher.next()\n\n while Xb is not None:\n # ensemble predict\n preds_batch, layer_reprs_batch, layer_grads_batch = self._gradients_ensemble(\n sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n, return_all=return_all)\n\n # unpack\n if return_all:\n preds_batch, preds_batch_all = preds_batch\n layer_reprs_batch, layer_reprs_batch_all = layer_reprs_batch\n layer_grads_batch, layer_grads_batch_all = layer_grads_batch\n\n # accumulate predictions\n preds[si:si+Nb,:,:] = preds_batch[:Nb,:,:]\n if return_all:\n preds_all[si:si+Nb,:,:,:] = preds_batch_all[:Nb,:,:,:]\n\n # accumulate representations\n for lii in range(len(self.grad_layers)):\n layer_reprs[lii][si:si+Nb] = layer_reprs_batch[lii][:Nb]\n if return_all:\n layer_reprs_all[lii][si:si+Nb] = layer_reprs_batch_all[lii][:Nb]\n\n # accumulate gradients\n for lii in range(len(self.grad_layers)):\n for ti in range(self.hp.num_targets):\n layer_grads[lii][ti,si:si+Nb,:,:] = layer_grads_batch[lii][ti,:Nb,:,:]\n if return_all:\n layer_grads_all[lii][ti,si:si+Nb,:,:,:] = layer_grads_batch_all[lii][ti,:Nb,:,:,:]\n\n # update sequence index\n si += Nb\n\n # next batch\n Xb, _, _, Nb = batcher.next()\n\n # reset training batcher\n batcher.reset()\n\n\n #######################################################################\n # modify and return\n\n # move sequences to front\n for lii in range(len(self.grad_layers)):\n layer_grads[lii] = np.transpose(layer_grads[lii], [1,0,2,3])\n if return_all:\n layer_grads_all[lii] = np.transpose(layer_grads_all[lii], [1,0,2,3,4])\n\n if return_all:\n return layer_grads, layer_reprs, preds, layer_grads_all, layer_reprs_all, preds_all\n else:\n return layer_grads, layer_reprs, preds\n\n\n def _gradients_ensemble(self, sess, fd, Xb,\n ensemble_fwdrc, ensemble_shifts, mc_n,\n return_var=False, return_all=False):\n \"\"\" Compute gradients over an ensemble of input augmentations.\n\n In\n sess: TensorFlow session\n fd: feed dict\n Xb: input data\n ensemble_fwdrc:\n ensemble_shifts:\n mc_n:\n return_var:\n return_all: Return all ensemble predictions.\n\n Out\n preds:\n layer_reprs:\n layer_grads\n \"\"\"\n\n # initialize batch predictions\n preds = np.zeros((Xb.shape[0], self.preds_length, self.hp.num_targets), dtype='float32')\n\n # initialize layer representations and gradients\n layer_reprs = []\n layer_grads = []\n for lii in range(len(self.grad_layers)):\n li = self.grad_layers[lii]\n layer_seq_len = self.layer_reprs[li].shape[1].value\n layer_units = self.layer_reprs[li].shape[2].value\n\n lr = np.zeros((Xb.shape[0], layer_seq_len, layer_units), dtype='float16')\n layer_reprs.append(lr)\n\n lg = np.zeros((self.hp.num_targets, Xb.shape[0], layer_seq_len, layer_units), dtype='float32')\n layer_grads.append(lg)\n\n\n # initialize variance\n if return_var:\n preds_var = np.zeros(preds.shape, dtype='float32')\n\n layer_reprs_var = []\n layer_grads_var = []\n for lii in range(len(self.grad_layers)):\n layer_reprs_var.append(np.zeros(layer_reprs.shape, dtype='float32'))\n layer_grads_var.append(np.zeros(layer_grads.shape, dtype='float32'))\n else:\n preds_var = None\n layer_grads_var = [None]*len(self.grad_layers)\n\n\n # initialize all-saving arrays\n if return_all:\n all_n = mc_n * len(ensemble_fwdrc)\n preds_all = np.zeros((Xb.shape[0], self.preds_length, self.hp.num_targets, all_n), dtype='float32')\n\n layer_reprs_all = []\n layer_grads_all = []\n for lii in range(len(self.grad_layers)):\n ls = tuple(list(layer_reprs[lii].shape) + [all_n])\n layer_reprs_all.append(np.zeros(ls, dtype='float32'))\n\n ls = tuple(list(layer_grads[lii].shape) + [all_n])\n layer_grads_all.append(np.zeros(ls, dtype='float32'))\n else:\n preds_all = None\n layer_grads_all = [None]*len(self.grad_layers)\n\n\n running_i = 0\n\n for ei in range(len(ensemble_fwdrc)):\n # construct sequence\n Xb_ensemble = hot1_augment(Xb, ensemble_fwdrc[ei], ensemble_shifts[ei])\n\n # update feed dict\n fd[self.inputs_ph] = Xb_ensemble\n\n # for each monte carlo (or non-mc single) iteration\n for mi in range(mc_n):\n # print('ei=%d, mi=%d, fwdrc=%d, shifts=%d' % \\\n # (ei, mi, ensemble_fwdrc[ei], ensemble_shifts[ei]),\n # flush=True)\n\n ##################################################\n # prediction\n\n # predict\n preds_ei, layer_reprs_ei = sess.run([self.preds_train, self.layer_reprs], feed_dict=fd)\n\n # reverse\n if ensemble_fwdrc[ei] is False:\n preds_ei = preds_ei[:,::-1,:]\n\n # save previous mean\n preds1 = preds\n\n # update mean\n preds = self.running_mean(preds1, preds_ei, running_i+1)\n\n # update variance sum\n if return_var:\n preds_var = self.running_varsum(preds_var, preds_ei, preds1, preds)\n\n # save iteration\n if return_all:\n preds_all[:,:,:,running_i] = preds_ei[:,:,:]\n\n\n ##################################################\n # representations\n\n for lii in range(len(self.grad_layers)):\n li = self.grad_layers[lii]\n\n # reverse\n if ensemble_fwdrc[ei] is False:\n layer_reprs_ei[li] = layer_reprs_ei[li][:,::-1,:]\n\n # save previous mean\n layer_reprs_lii1 = layer_reprs[lii]\n\n # update mean\n layer_reprs[lii] = self.running_mean(layer_reprs_lii1, layer_reprs_ei[li], running_i+1)\n\n # update variance sum\n if return_var:\n layer_reprs_var[lii] = self.running_varsum(layer_reprs_var[lii], layer_reprs_ei[li],\n layer_reprs_lii1, layer_reprs[lii])\n\n # save iteration\n if return_all:\n layer_reprs_all[lii][:,:,:,running_i] = layer_reprs_ei[li]\n\n\n ##################################################\n # gradients\n\n # compute gradients for each target individually\n for ti in range(self.hp.num_targets):\n # compute gradients\n layer_grads_ti_ei = sess.run(self.grad_ops[ti], feed_dict=fd)\n\n for lii in range(len(self.grad_layers)):\n # reverse\n if ensemble_fwdrc[ei] is False:\n layer_grads_ti_ei[lii] = layer_grads_ti_ei[lii][:,::-1,:]\n\n # save predious mean\n layer_grads_lii_ti1 = layer_grads[lii][ti]\n\n # update mean\n layer_grads[lii][ti] = self.running_mean(layer_grads_lii_ti1, layer_grads_ti_ei[lii], running_i+1)\n\n # update variance sum\n if return_var:\n layer_grads_var[lii][ti] = self.running_varsum(layer_grads_var[lii][ti], layer_grads_ti_ei[lii],\n layer_grads_lii_ti1, layer_grads[lii][ti])\n\n # save iteration\n if return_all:\n layer_grads_all[lii][ti,:,:,:,running_i] = layer_grads_ti_ei[lii]\n\n # update running index\n running_i += 1\n\n if return_var:\n return (preds, preds_var), (layer_reprs, layer_reprs_var), (layer_grads, layer_grads_var)\n elif return_all:\n return (preds, preds_all), (layer_reprs, layer_reprs_all), (layer_grads, layer_grads_all)\n else:\n return preds, layer_reprs, layer_grads\n\n def gradients_genes(self, sess, batcher, gene_seqs):\n ''' Compute predictions on a test set.\n In\n sess: TensorFlow session\n batcher: Batcher class with sequence(s)\n gene_seqs: List of GeneSeq instances specifying gene positions in sequences.\n Out\n layer_grads: [G (TSSs) x T (targets) x P (seq position) x U (Units layer i) array] * (L layers)\n layer_reprs: [S (sequences) x P (seq position) x U (Units layer i) array] * (L layers)\n Notes\n -Reverse complements aren't implemented yet. They're trickier here, because\n I'd need to build more gradient ops to match the flipped positions.\n '''\n\n # count TSSs\n tss_num = 0\n for gene_seq in gene_seqs:\n tss_num += len(gene_seq.tss_list)\n\n # initialize gradients and representations\n # (I need a list for layers because the sizes are different within)\n # (TSSxTargets up front, because I need to run their ops one by one)\n layer_grads = []\n layer_reprs = []\n for lii in range(len(self.grad_layers)):\n li = self.grad_layers[lii]\n layer_seq_len = self.layer_reprs[li].shape[1].value\n layer_units = self.layer_reprs[li].shape[2].value\n\n # gradients\n lg = np.zeros((tss_num, self.hp.num_targets, layer_seq_len, layer_units), dtype='float32')\n layer_grads.append(lg)\n\n # representations\n lr = np.zeros((batcher.num_seqs, layer_seq_len, layer_units), dtype='float32')\n layer_reprs.append(lr)\n\n # setup feed dict for dropout\n fd = self.set_mode('test')\n\n # TSS index\n tss_i = 0\n\n # sequence index\n si = 0\n\n # get first batch\n Xb, _, _, Nb = batcher.next()\n\n while Xb is not None:\n # update feed dict\n fd[self.inputs_ph] = Xb\n\n # predict\n reprs_batch, _ = sess.run([self.layer_reprs, self.preds_train], feed_dict=fd)\n\n # save representations\n for lii in range(len(self.grad_layers)):\n li = self.grad_layers[lii]\n layer_reprs[lii][si:si+Nb] = reprs_batch[li][:Nb]\n\n # compute gradients for each TSS position individually\n for bi in range(Nb):\n for tss in gene_seqs[si+bi].tss_list:\n # get TSS prediction bin position\n pi = tss.seq_bin(width=self.hp.target_pool, pred_buffer=self.hp.batch_buffer)\n\n for ti in range(self.hp.num_targets):\n # compute gradients over all positions\n grads_batch = sess.run(self.grad_pos_ops[pi][ti], feed_dict=fd)\n\n # accumulate gradients\n for lii in range(len(self.grad_layers)):\n layer_grads[lii][tss_i,ti,:,:] = grads_batch[lii][bi]\n\n # update TSS index\n tss_i += 1\n\n # update sequence index\n si += Nb\n\n # next batch\n Xb, _, _, Nb = batcher.next()\n\n # reset training batcher\n batcher.reset()\n\n return layer_grads, layer_reprs\n\n\n def hidden(self, sess, batcher, layers=None, test_batches=None):\n \"\"\" Compute hidden representations for a test set.\n\n In\n sess: TensorFlow session\n batcher: Batcher class with sequences.\n layers: Layer indexes to return representations.\n test_batches: Number of test batches to use.\n\n Out\n preds: S (sequences) x L (unbuffered length) x T (targets) array\n \"\"\"\n\n if layers is None:\n layers = list(range(self.hp.cnn_layers))\n\n # initialize layer representation data structure\n layer_reprs = []\n for li in range(1 + np.max(layers)):\n layer_reprs.append([])\n preds = []\n\n # setup feed dict\n fd = self.set_mode('test')\n\n # get first batch\n Xb, _, _, Nb = batcher.next()\n\n batch_num = 0\n while Xb is not None and (test_batches is None or\n batch_num < test_batches):\n # update feed dict\n fd[self.inputs_ph] = Xb\n\n # compute predictions\n layer_reprs_batch, preds_batch = sess.run(\n [self.layer_reprs, self.preds_train], feed_dict=fd)\n\n # accumulate representationsmakes the number of members for self smaller and also\n for li in layers:\n # squeeze (conv_2d-expanded) second dimension\n if layer_reprs_batch[li].shape[1] == 1:\n layer_reprs_batch[li] = layer_reprs_batch[li].squeeze(axis=1)\n\n # append\n layer_reprs[li].append(layer_reprs_batch[li][:Nb].astype('float16'))\n\n # accumualte predictions\n preds.append(preds_batch[:Nb])\n\n # next batch\n Xb, _, _, Nb = batcher.next()\n batch_num += 1\n\n # reset batcher\n batcher.reset()\n\n # accumulate representations\n for li in layers:\n layer_reprs[li] = np.vstack(layer_reprs[li])\n\n preds = np.vstack(preds)\n\n return layer_reprs, preds\n\n\n def _predict_ensemble(self,\n sess,\n fd,\n Xb,\n ensemble_fwdrc,\n ensemble_shifts,\n mc_n,\n ds_indexes=None,\n target_indexes=None,\n return_var=False,\n return_all=False,\n embed_penultimate=False):\n\n # determine predictions length\n preds_length = self.preds_length\n if ds_indexes is not None:\n preds_length = len(ds_indexes)\n\n # determine num targets\n if embed_penultimate:\n num_targets = self.hp.cnn_params[-1].filters\n else:\n num_targets = self.hp.sum_targets\n if target_indexes is not None:\n num_targets = len(target_indexes)\n\n # initialize batch predictions\n preds_batch = np.zeros(\n (Xb.shape[0], preds_length, num_targets), dtype='float32')\n\n if return_var:\n preds_batch_var = np.zeros(preds_batch.shape, dtype='float32')\n else:\n preds_batch_var = None\n\n if return_all:\n all_n = mc_n * len(ensemble_fwdrc)\n preds_all = np.zeros(\n (Xb.shape[0], preds_length, num_targets, all_n), dtype='float32')\n else:\n preds_all = None\n\n running_i = 0\n\n for ei in range(len(ensemble_fwdrc)):\n # construct sequence\n Xb_ensemble = hot1_augment(Xb, ensemble_fwdrc[ei], ensemble_shifts[ei])\n\n # update feed dict\n fd[self.inputs_ph] = Xb_ensemble\n\n # for each monte carlo (or non-mc single) iteration\n for mi in range(mc_n):\n # print('ei=%d, mi=%d, fwdrc=%d, shifts=%d' % (ei, mi, ensemble_fwdrc[ei], ensemble_shifts[ei]), flush=True)\n\n # predict\n preds_ei = sess.run(self.preds_eval, feed_dict=fd)\n\n # reverse\n if ensemble_fwdrc[ei] is False:\n preds_ei = preds_ei[:, ::-1, :]\n\n # down-sample\n if ds_indexes is not None:\n preds_ei = preds_ei[:, ds_indexes, :]\n if target_indexes is not None:\n preds_ei = preds_ei[:, :, target_indexes]\n\n # save previous mean\n preds_batch1 = preds_batch\n\n # update mean\n preds_batch = self.running_mean(preds_batch1, preds_ei, running_i + 1)\n\n # update variance sum\n if return_var:\n preds_batch_var = self.running_varsum(preds_batch_var, preds_ei,\n preds_batch1, preds_batch)\n\n # save iteration\n if return_all:\n preds_all[:, :, :, running_i] = preds_ei[:, :, :]\n\n # update running index\n running_i += 1\n\n return preds_batch, preds_batch_var, preds_all\n\n def predict_h5_manual(self, sess, batcher,\n rc=False, shifts=[0], mc_n=0,\n target_indexes=None,\n return_var=False, return_all=False,\n down_sample=1, embed_penultimate=False,\n test_batches=None, dtype='float32'):\n \"\"\" Compute predictions on a test set.\n\n In\n sess: TensorFlow session\n batcher: Batcher class with sequences.\n rc: Average predictions from the forward and reverse\n complement sequences.\n shifts: Average predictions from sequence shifts left/right.\n mc_n: Monte Carlo iterations per rc/shift.\n target_indexes: Optional target subset list\n return_var: Return variance estimates\n down_sample: Int specifying to consider uniformly spaced sampled\n positions\n embed_penultimate: Predict the embed_penultimate layer.\n test_batches Number of test batches to use.\n dtype: Float resolution to return.\n\n Out\n preds: S (sequences) x L (unbuffered length) x T (targets) array\n \"\"\"\n\n # uniformly sample indexes\n ds_indexes = None\n preds_length = self.preds_length\n if down_sample != 1:\n ds_indexes = np.arange(0, self.preds_length, down_sample)\n preds_length = len(ds_indexes)\n\n # initialize prediction arrays\n if embed_penultimate:\n num_targets = self.hp.cnn_params[-1].filters\n else:\n num_targets = self.hp.sum_targets\n if target_indexes is not None:\n num_targets = len(target_indexes)\n\n # determine ensemble iteration parameters\n ensemble_fwdrc = []\n ensemble_shifts = []\n for shift in shifts:\n ensemble_fwdrc.append(True)\n ensemble_shifts.append(shift)\n if rc:\n ensemble_fwdrc.append(False)\n ensemble_shifts.append(shift)\n\n if mc_n > 0:\n # setup feed dict\n fd = self.set_mode('test_mc')\n\n else:\n # setup feed dict\n fd = self.set_mode('test')\n\n # co-opt the variable to represent\n # iterations per fwdrc/shift.\n mc_n = 1\n\n # total ensemble predictions\n all_n = mc_n * len(ensemble_fwdrc)\n\n # initialize prediction data structures\n if test_batches is None:\n num_seqs = batcher.remaining()\n else:\n num_seqs = min(batcher.remaining(), self.hp.batch_size*test_batches)\n\n preds = np.zeros(\n (num_seqs, preds_length, num_targets), dtype=dtype)\n if return_var:\n if all_n == 1:\n print(\n 'Cannot return prediction variance. Add rc, shifts, or mc.',\n file=sys.stderr)\n exit(1)\n preds_var = np.zeros(\n (num_seqs, preds_length, num_targets), dtype=dtype)\n if return_all:\n preds_all = np.zeros(\n (num_seqs, preds_length, num_targets, all_n), dtype=dtype)\n\n # indexes\n si = 0\n batch_num = 0\n\n # while we want more batches\n while test_batches is None or batch_num < test_batches:\n # get batch\n Xb, _, _, Nb = batcher.next()\n\n # verify fidelity\n if Xb is None:\n break\n else:\n # make ensemble predictions\n preds_batch, preds_batch_var, preds_batch_all = self._predict_ensemble(\n sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n, ds_indexes,\n target_indexes, return_var, return_all, embed_penultimate)\n\n # accumulate predictions\n preds[si:si + Nb, :, :] = preds_batch[:Nb, :, :]\n if return_var:\n preds_var[si:si + Nb, :, :] = preds_batch_var[:Nb, :, :] / (all_n - 1)\n if return_all:\n preds_all[si:si + Nb, :, :, :] = preds_batch_all[:Nb, :, :, :]\n\n # update sequence index\n si += Nb\n\n # next batch\n batch_num += 1\n\n if return_var:\n if return_all:\n return preds, preds_var, preds_all\n else:\n return preds, preds_var\n else:\n return preds\n\n def predict_h5(self, sess, batcher, test_batches=None,\n return_var=False, return_all=False):\n \"\"\" Compute preidctions on an HDF5 test set.\n\n Args:\n sess: TensorFlow session\n batcher: Batcher class with sequences.\n test_batches: Number of test batches to use.\n return_var: Return variance estimates\n return_all: Retyrn all predictions.\n\n Returns:\n preds: S (sequences) x L (unbuffered length) x T (targets) array\n \"\"\"\n fd = self.set_mode('test')\n\n # initialize prediction data structures\n preds = []\n if return_var:\n preds_var = []\n if return_all:\n preds_all = []\n\n # count batches\n batch_num = 0\n\n # while we want more batches\n while test_batches is None or batch_num < test_batches:\n # get batch\n Xb, _, _, Nb = batcher.next()\n\n # verify fidelity\n if Xb is None:\n break\n else:\n # update feed dict\n fd[self.inputs_ph] = Xb\n\n # make predictions\n if return_var or return_all:\n preds_batch, preds_ensemble_batch = sess.run([self.preds_eval, self.preds_ensemble], feed_dict=fd)\n\n # move ensemble to back\n preds_ensemble_batch = np.moveaxis(preds_ensemble_batch, 0, -1)\n\n else:\n preds_batch = sess.run(self.preds_eval, feed_dict=fd)\n\n # accumulate predictions and targets\n preds.append(preds_batch[:Nb])\n if return_var:\n preds_var_batch = np.var(preds_ensemble_batch, axis=-1)\n preds_var.append(preds_var_batch[:Nb])\n if return_all:\n preds_all.append(preds_ensemble_batch[:Nb])\n\n # next batch\n batch_num += 1\n\n # construct arrays\n preds = np.concatenate(preds, axis=0)\n if return_var:\n preds_var = np.concatenate(preds_var, axis=0)\n if return_all:\n preds_all = np.concatenate(preds_all, axis=0)\n\n if return_var:\n if return_all:\n return preds, preds_var, preds_all\n else:\n return preds, preds_var\n else:\n return preds\n\n def predict_tfr(self, sess, test_batches=None, sample=1.,\n return_var=False, return_all=False):\n \"\"\" Compute preidctions on a TFRecord test set.\n\n Args:\n sess: TensorFlow session\n test_batches: Number of test batches to use.\n sample: Down sample positions uniformly.\n return_var: Return variance estimates\n return_all: Retyrn all predictions.\n\n Returns:\n preds: S (sequences) x L (unbuffered length) x T (targets) array\n \"\"\"\n fd = self.set_mode('test')\n\n # down sample\n if sample != 1:\n assert(0 < sample < 1)\n sample_pos_len = int(sample*self.preds_length)\n sample_pos = np.linspace(0, self.preds_length-1,\n sample_pos_len, dtype='int')\n\n # initialize prediction data structures\n preds = []\n preds_var = []\n preds_all = []\n\n # sequence index\n data_available = True\n batch_num = 0\n while data_available and (test_batches is None or batch_num < test_batches):\n try:\n # make predictions\n if return_var or return_all:\n preds_batch, preds_ensemble_batch = sess.run([self.preds_eval, self.preds_ensemble], feed_dict=fd)\n\n # move ensemble to back\n preds_ensemble_batch = np.moveaxis(preds_ensemble_batch, 0, -1)\n\n else:\n preds_batch = sess.run(self.preds_eval, feed_dict=fd)\n\n # down sample\n if sample != 1:\n preds_batch = preds_batch[:,sample_pos,:]\n if return_var or return_all:\n preds_ensemble_batch = preds_ensemble_batch[:,sample_pos,:,:]\n\n # accumulate predictions and targets\n preds.append(preds_batch.astype('float16'))\n if return_var:\n preds_var_batch = np.var(preds_ensemble_batch, axis=-1)\n preds_var.append(preds_var_batch.astype('float16'))\n if return_all:\n preds_all.append(preds_ensemble_batch.astype('float16'))\n\n batch_num += 1\n\n except tf.errors.OutOfRangeError:\n data_available = False\n\n if preds:\n # concatenate into arrays\n preds = np.concatenate(preds, axis=0)\n if return_var and preds_var:\n preds_var = np.concatenate(preds_var, axis=0)\n if return_all and preds_all:\n preds_all = np.concatenate(preds_all, axis=0)\n\n else:\n # return empty array objects\n preds = np.array(preds)\n preds_var = np.array(preds_var)\n preds_all = np.array(preds_all)\n\n if return_var:\n if return_all:\n return preds, preds_var, preds_all\n else:\n return preds, preds_var\n else:\n return preds\n\n def predict_genes(self,\n sess,\n batcher,\n gene_seqs,\n rc=False,\n shifts=[0],\n mc_n=0,\n target_indexes=None,\n tss_radius=0,\n embed_penultimate=False,\n test_batches_per=256,\n dtype='float32'):\n \"\"\" Compute predictions on a test set.\n\n In\n sess: TensorFlow session\n batcher: Batcher class with transcript-covering sequences\n gene_seqs List of GeneSeq instances specifying gene positions in sequences.\n index, position) tuples marking TSSs.\n rc: Average predictions from the forward and reverse\n complement sequences.\n shifts: Average predictions from sequence shifts left/right.\n mc_n: Monte Carlo iterations per rc/shift.\n target_indexes: Optional target subset list\n tss_radius: Radius of bins to quantify TSS.\n embed_penultimate: Predict the embed_penultimate layer.\n dtype: Float resolution to return.\n\n Out\n transcript_preds: G (gene transcripts) X T (targets) array\n \"\"\"\n\n # count TSSs\n tss_num = 0\n for gene_seq in gene_seqs:\n tss_num += len(gene_seq.tss_list)\n\n # count targets\n if embed_penultimate:\n num_targets = self.hp.cnn_params[-1].filters\n else:\n num_targets = self.hp.sum_targets\n if target_indexes is not None:\n num_targets = len(target_indexes)\n\n # initialize TSS preds\n tss_preds = np.zeros((tss_num, num_targets), dtype=dtype)\n\n # initialize indexes\n tss_i = 0\n si = 0\n\n while not batcher.empty():\n # predict gene sequences\n gseq_preds = self.predict_h5_manual(sess, batcher, rc=rc, shifts=shifts, mc_n=mc_n,\n target_indexes=target_indexes, embed_penultimate=embed_penultimate,\n test_batches=test_batches_per)\n # slice TSSs\n for bsi in range(gseq_preds.shape[0]):\n for tss in gene_seqs[si].tss_list:\n bi = tss.seq_bin(width=self.hp.target_pool, pred_buffer=self.hp.batch_buffer)\n tss_preds[tss_i,:] = gseq_preds[bsi,bi-tss_radius:bi+1+tss_radius,:].sum(axis=0)\n tss_i += 1\n si += 1\n\n batcher.reset()\n\n return tss_preds\n\n\n def test_tfr(self, sess, dataset, handle_ph=None, test_batches=None, sample=1.0):\n \"\"\" Compute model accuracy on a test set, where data is loaded from a queue.\n\n Args:\n sess: TensorFlow session\n dataset: Dataset\n handle_ph: Dataset handle placeholder\n test_batches: Number of test batches to use.\n sample: Sample sequence positions to save predictions/targets.\n\n Returns:\n acc: Accuracy object\n \"\"\"\n fd = self.set_mode('test')\n\n if handle_ph is not None:\n fd[handle_ph] = dataset.handle\n\n # initialize prediction and target arrays\n if test_batches is None:\n num_seqs = dataset.num_seqs\n else:\n num_seqs = min(dataset.num_seqs, test_batches*self.hp.batch_size)\n\n # need to wait for variable num_targets\n sample_length = int(np.round(sample*self.preds_length))\n preds = None\n targets = None\n targets_na = np.zeros((num_seqs, sample_length), dtype='bool')\n\n batch_losses = []\n batch_target_losses = []\n batch_sizes = []\n\n # sequence index\n data_available = True\n batch_num = 0\n si = 0\n while data_available and (test_batches is None or batch_num < test_batches):\n try:\n # make predictions\n # run_ops = [self.targets_eval, self.preds_eval_loss,\n # self.loss_eval, self.loss_eval_targets]\n run_ops = [self.targets_train, self.preds_train_loss,\n self.loss_train, self.loss_train_targets]\n run_returns = sess.run(run_ops, feed_dict=fd)\n targets_batch, preds_batch, loss_batch, target_losses_batch = run_returns\n batch_size, _, num_targets = preds_batch.shape\n\n # w/ target knowledge, create arrays\n if preds is None:\n preds = np.zeros((num_seqs, sample_length, num_targets), dtype='float16')\n targets = np.zeros((num_seqs, sample_length, num_targets), dtype='float16')\n\n # accumulate predictions and targets\n if sample_length < self.preds_length:\n sampled_indexes = np.random.choice(np.arange(self.preds_length),\n size=sample_length, replace=False)\n sampled_indexes.sort()\n preds[si:si+batch_size] = preds_batch[:,sampled_indexes,:]\n targets[si:si+batch_size] = targets_batch[:,sampled_indexes,:]\n else:\n preds[si:si+batch_size] = preds_batch\n targets[si:si+batch_size] = targets_batch\n # targets_na is already zero\n\n # accumulate loss\n batch_losses.append(loss_batch)\n batch_target_losses.append(target_losses_batch)\n batch_sizes.append(preds_batch.shape[0])\n\n batch_num += 1\n si += batch_size\n\n except tf.errors.OutOfRangeError:\n data_available = False\n\n # mean across batches\n batch_losses = np.array(batch_losses, dtype='float64')\n batch_losses = np.average(batch_losses, weights=batch_sizes)\n batch_target_losses = np.array(batch_target_losses, dtype='float64')\n batch_target_losses = np.average(batch_target_losses, axis=0, weights=batch_sizes)\n\n # instantiate accuracy object\n acc = accuracy.Accuracy(targets, preds, targets_na,\n batch_losses, batch_target_losses)\n\n return acc\n\n\n def test_h5(self, sess, batcher, test_batches=None):\n \"\"\" Compute model accuracy on a test set.\n\n Args:\n sess: TensorFlow session\n batcher: Batcher object to provide data\n test_batches: Number of test batches\n\n Returns:\n acc: Accuracy object\n \"\"\"\n # setup feed dict\n fd = self.set_mode('test')\n\n # initialize prediction and target arrays\n preds = []\n targets = []\n targets_na = []\n\n batch_losses = []\n batch_target_losses = []\n batch_sizes = []\n\n # get first batch\n batch_num = 0\n Xb, Yb, NAb, Nb = batcher.next()\n\n while Xb is not None and (test_batches is None or\n batch_num < test_batches):\n # update feed dict\n fd[self.inputs_ph] = Xb\n fd[self.targets_ph] = Yb\n\n # make predictions\n run_ops = [self.targets_eval, self.preds_eval_loss,\n self.loss_eval, self.loss_eval_targets]\n run_returns = sess.run(run_ops, feed_dict=fd)\n targets_batch, preds_batch, loss_batch, target_losses_batch = run_returns\n\n # accumulate predictions and targets\n preds.append(preds_batch[:Nb,:,:].astype('float16'))\n targets.append(targets_batch[:Nb,:,:].astype('float16'))\n targets_na.append(np.zeros([Nb, self.preds_length], dtype='bool'))\n\n # accumulate loss\n batch_losses.append(loss_batch)\n batch_target_losses.append(target_losses_batch)\n batch_sizes.append(Nb)\n\n # next batch\n batch_num += 1\n Xb, Yb, NAb, Nb = batcher.next()\n\n # reset batcher\n batcher.reset()\n\n # construct arrays\n targets = np.concatenate(targets, axis=0)\n preds = np.concatenate(preds, axis=0)\n targets_na = np.concatenate(targets_na, axis=0)\n\n # mean across batches\n batch_losses = np.array(batch_losses, dtype='float64')\n batch_losses = np.average(batch_losses, weights=batch_sizes)\n batch_target_losses = np.array(batch_target_losses, dtype='float64')\n batch_target_losses = np.average(batch_target_losses, axis=0, weights=batch_sizes)\n\n # instantiate accuracy object\n acc = accuracy.Accuracy(targets, preds, targets_na,\n batch_losses, batch_target_losses)\n\n return acc\n\n def test_h5_manual(self,\n sess,\n batcher,\n rc=False,\n shifts=[0],\n mc_n=0,\n test_batches=None):\n \"\"\" Compute model accuracy on a test set.\n\n Args:\n sess: TensorFlow session\n batcher: Batcher object to provide data\n rc: Average predictions from the forward and reverse\n complement sequences.\n shifts: Average predictions from sequence shifts left/right.\n mc_n: Monte Carlo iterations per rc/shift.\n test_batches: Number of test batches\n\n Returns:\n acc: Accuracy object\n \"\"\"\n\n # determine ensemble iteration parameters\n ensemble_fwdrc = []\n ensemble_shifts = []\n for shift in shifts:\n ensemble_fwdrc.append(True)\n ensemble_shifts.append(shift)\n if rc:\n ensemble_fwdrc.append(False)\n ensemble_shifts.append(shift)\n\n if mc_n > 0:\n # setup feed dict\n fd = self.set_mode('test_mc')\n\n else:\n # setup feed dict\n fd = self.set_mode('test')\n\n # co-opt the variable to represent\n # iterations per fwdrc/shift.\n mc_n = 1\n\n # initialize prediction and target arrays\n preds = []\n targets = []\n targets_na = []\n\n batch_losses = []\n batch_target_losses = []\n batch_size = []\n\n # get first batch\n Xb, Yb, NAb, Nb = batcher.next()\n\n batch_num = 0\n while Xb is not None and (test_batches is None or\n batch_num < test_batches):\n # make ensemble predictions\n preds_batch, preds_batch_var, preds_all = self._predict_ensemble(\n sess, fd, Xb, ensemble_fwdrc, ensemble_shifts, mc_n)\n\n # add target info\n fd[self.targets_ph] = Yb\n fd[self.targets_na_ph] = NAb\n\n targets_na.append(np.zeros([Nb, self.preds_length], dtype='bool'))\n\n # recompute loss w/ ensembled prediction\n fd[self.preds_adhoc] = preds_batch\n targets_batch, loss_batch, target_losses_batch = sess.run(\n [self.targets_train, self.loss_adhoc, self.target_losses_adhoc],\n feed_dict=fd)\n\n # accumulate predictions and targets\n if preds_batch.ndim == 3:\n preds.append(preds_batch[:Nb, :, :].astype('float16'))\n targets.append(targets_batch[:Nb, :, :].astype('float16'))\n\n else:\n for qi in range(preds_batch.shape[3]):\n # TEMP, ideally this will be in the HDF5 and set previously\n self.quantile_means = np.geomspace(0.1, 256, 16)\n\n # softmax\n preds_batch_norm = np.expand_dims(\n np.sum(np.exp(preds_batch[:Nb, :, :, :]), axis=3), axis=3)\n pred_probs_batch = np.exp(\n preds_batch[:Nb, :, :, :]) / preds_batch_norm\n\n # expectation over quantile medians\n preds.append(np.dot(pred_probs_batch, self.quantile_means))\n\n # compare to quantile median\n targets.append(self.quantile_means[targets_batch[:Nb, :, :] - 1])\n\n # accumulate loss\n batch_losses.append(loss_batch)\n batch_target_losses.append(target_losses_batch)\n batch_sizes.append(Nb)\n\n # next batch\n Xb, Yb, NAb, Nb = batcher.next()\n batch_num += 1\n\n targets = np.concatenate(targets, axis=0)\n preds = np.concatenate(preds, axis=0)\n targets_na = np.concatenate(targets_na, axis=0)\n\n # reset batcher\n batcher.reset()\n\n # mean across batches\n batch_losses = np.array(batch_losses, dtype='float64')\n batch_losses = np.average(batch_losses, weights=batch_sizes)\n batch_target_losses = np.array(batch_target_losses, dtype='float64')\n batch_target_losses = np.average(batch_target_losses, axis=0, weights=batch_sizes)\n\n # instantiate accuracy object\n acc = accuracy.Accuracy(targets, preds, targets_na, batch_losses,\n batch_target_losses)\n\n return acc\n\n def running_mean(self, u_k1, x_k, k):\n return u_k1 + (x_k - u_k1) / k\n\n def running_varsum(self, v_k1, x_k, m_k1, m_k):\n \"\"\" Computing the running variance numerator.\n\n Ref: https://www.johndcook.com/blog/standard_deviation/\n \"\"\"\n return v_k1 + (x_k - m_k1) * (x_k - m_k)\n","repo_name":"calico/basenji","sub_path":"basenji/archive/seqnn_util.py","file_name":"seqnn_util.py","file_ext":"py","file_size_in_byte":41308,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"62"} +{"seq_id":"74520516676","text":"# Importing the library for the shift amount\r\nimport shift_lib\r\nencodeLib = shift_lib.encode_lib\r\n\r\ninp = input()\r\nshift = input()\r\n\r\n# Turning the inputted shift amount to an integer\r\nshift = int(shift) * 1\r\n\r\n# Making sure shift is between 1 and 10\r\nif shift > 10:\r\n shift = 10\r\nelif shift < 1:\r\n shift = 1\r\n\r\n# Gets the character that will determine the shift amount when decoding\r\nshift_character = encodeLib.get(shift, None)\r\n\r\n# Splitting the input into a list so that each character can be shifted\r\ninput_split = list(inp)\r\n\r\n# Converts the characters to ascii\r\nascii_codes = [ord(char) for char in input_split]\r\n\r\n# Shifts up the ascii codes by shift amount\r\n# Doesnt shift if the character is a space\r\nshifted_ascii = [int + shift if int != 32 else 32 for int in ascii_codes]\r\n\r\n# If a ascii is to large, round to 33\r\nshifted_ascii_corrected = [33 if int > 126 else int for int in shifted_ascii]\r\n\r\n# Convert back to text\r\ntext_coded_output = [chr(int) for int in shifted_ascii_corrected]\r\n\r\n# Combines the shift character with the coded text\r\nfull_coded_output = shift_character + ' ' + ''.join(text_coded_output)\r\n\r\nprint(full_coded_output)","repo_name":"cTfTs-BC/CeasarB","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6343427892","text":"import requests\ndef main():\n\n url=\"https://en.wikipedia.org/api/rest_v1/page/pdf\"\n askf=input('Enter your search :')\n respone = requests.get(url+\"/\"+askf)\n with open('metadata.pdf', 'wb') as f:\n f.write(respone.content)\nif __name__=='__main__':\n main()\n","repo_name":"mahmoodkia/f","sub_path":"wiki_pdf.py","file_name":"wiki_pdf.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39894390948","text":"from django.urls import path\n\nfrom src.core.views import LoginView, logout_view, Indexview, RegisterHitmenView\n\n\napp_name ='core'\nurlpatterns = [\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', logout_view, name='logout'),\n path('register/', RegisterHitmenView.as_view(), name='register'),\n path('', Indexview.as_view(), name='home'),\n]\n","repo_name":"RoodrigoRoot/spy_platform","sub_path":"src/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19149845394","text":"from setuptools import setup, find_packages\nname=\"cardlib\"\nversion=\"1.0.0\"\ndescription=\"A library to manage learning cards\"\nauthor=\"Joshua Rohmann\"\nauthor_email=\"programmierjosh@yahoo.com\"\npackages=find_packages()\nsetup(\n name=name,\n version=version,\n description=description,\n author=author,\n author_email=author_email,\n packages=packages\n)","repo_name":"josh-programmieren/cardlib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18777315291","text":"#HSRが北斗館を案内するデモ\n# Hiroyuki Okada, 30 Dec 2018\n\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport rospy\nimport math\nimport sys\nimport datetime\nimport numpy as np\nimport tf\nfrom std_msgs.msg import *\nfrom geometry_msgs.msg import *\nfrom collections import OrderedDict\nfrom roslib import message\nfrom sensor_msgs import point_cloud2\nfrom sensor_msgs.msg import PointCloud2\nfrom geometry_msgs.msg import Twist\nfrom math import copysign\nfrom math import pi\nfrom hsrb_interface import Robot\n\n\n# ロボット機能を使うための準備\nrobot = Robot()\nbase = robot.try_get('omni_base')\ntts = robot.try_get('default_tts')\nwhole_body = robot.try_get('whole_body')\n\ndef go_and_say(pos=(0,0,0), contents=''):\n try:\n base.go_abs(pos[0], pos[1], pos[2], 180.0)\n except:\n rospy.logerr('Fail go')\n tts.say(contents)\n rospy.sleep(5)\n\n_SENARIO = [\n ((2.9, 0.36, -1.57, 180,0), u'ここが僕のお気に入りのソファだ。くつろいでテレビが見れるよ。僕は座れないけどね。'),\n ((5.4, 0.07, -1.57, 180,0), u'ここからお台場の海が見えるよ。綺麗だね。'),\n ((5.7, 1.6, 3.14, 180,0), u'ここはIH式のレンジだ。何を作ろうかな。'),\n ((5.7, 2.6, 3.14, 180,0), u'ここがシンクだ。水は出ないけど。'),\n ((4.4, 5.8, -1.57, 180,0), u'ここでみんなで食事が出来るんだ。'),\n ((5.2, 6.3, 0.07, 180,0), u'これは近未来テレビ。何とジェスチャーで操作できるんだ。すごいね。'),\n ((1.5, 3.3, 1.57, 180,0), u'ここが僕の一番のおすすめスポットの気になる木。落ち着くな。'),\n ((0.0, 0.0, 3.14, 180,0), u'今日の説明はこれでおしまい。ばいばい。')]\n\nif __name__=='__main__':\n rospy.init_node('guide_hokuto', anonymous=True)\n\n # 自己位置を設定\n rospy.loginfo(\"Setting Initial Pose\")\n init_pos_x = rospy.get_param('~init_pos_x')\n init_pos_y = rospy.get_param('~init_pos_y')\n init_pos_th = rospy.get_param('~init_pos_th') \n\n rospy.loginfo(\"(x, y, th)=(%f, %f, %f)\",init_pos_x,init_pos_y,init_pos_th)\n\n pub = rospy.Publisher('initialpose', PoseWithCovarianceStamped)\n p = PoseWithCovarianceStamped();\n msg = PoseWithCovariance();\n q_angle = tf.transformations.quaternion_from_euler(0.0, 0.0, init_pos_th, 'sxyz')\n q = Quaternion(*q_angle)\n msg.pose = Pose(Point(init_pos_x ,init_pos_y, 0.0), q); #初期位置\n msg.covariance = [0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06853];\n p.pose = msg;\n p.header.stamp = rospy.Time.now()\n p.header.frame_id=\"map\"\n rospy.sleep(2.0)\n rospy.loginfo(\"Setting Pose\")\n pub.publish(p);\n\n\n # 初期姿勢に遷移\n try:\n whole_body.move_to_go()\n except:\n rospy.logerr('Fail move_to_neutral')\n\n # まずは一言\n tts.say(u'こんにちはHSRだよ。僕が北斗館を案内するね。')\n\n for unit in _SENARIO:\n go_and_say(unit[0], unit[1])\n","repo_name":"roboworks/okd_hsr_sample","sub_path":"scripts/guide_hokuto.py","file_name":"guide_hokuto.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23219497895","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom flask import Flask, jsonify, request\n\nfrom block_chain import BlockChain\nfrom address import Address\n\nfrom utils import dict_hash\n\napp = Flask(__name__)\n\n# 模拟钱包地址\naddr = Address(\"random string\")\n\n# 初始化本地区块链\nblockchain = BlockChain()\n\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n transaction_data = {\n \"sender\": \"0\",\n \"recipient\": addr.address,\n \"amount\": 6\n }\n\n blockchain.add_transaction(\n data=transaction_data,\n signature=\"0\",\n public_key=\"0\"\n )\n last_block = blockchain.chain[-1]\n blockchain.mining_block(dict_hash(last_block['headers']))\n block = blockchain.chain[-1]\n\n response = {\n 'message': \"New Block Forged\",\n 'index': len(blockchain.chain),\n 'transactions': block['transactions'],\n 'proof': block['headers']['nonce'],\n 'previous_hash': block['headers']['previous_hash'],\n }\n return jsonify(response), 200\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef new_transaction():\n values = json.loads(request.get_json())\n\n # 检查POST数据\n required = ['sender', 'recipient', 'amount', 'signature', 'public_key']\n if not all(k in values for k in required):\n return 'Missing values', 400\n\n # Create a new Transaction\n\n transaction_data = {\n \"sender\": values['sender'],\n \"recipient\": values['recipient'],\n \"amount\": values['amount']\n }\n blockchain.add_transaction(\n data=transaction_data,\n signature=values['signature'],\n public_key=values['public_key']\n )\n\n response = {'message': f'Transaction will be added to BlockChain'}\n return jsonify(response), 201\n\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n 'chain': [block for block in blockchain.chain],\n 'length': len(blockchain.chain),\n }\n return jsonify(response), 200\n\n\n@app.route('/nodes/register', methods=['POST'])\ndef register_nodes():\n values = json.loads(request.get_json())\n\n nodes = values['nodes']\n if nodes is None:\n return \"Error: Please supply a valid list of nodes\", 400\n\n for node in nodes:\n blockchain.add_neighbour(node)\n\n response = {\n 'message': 'New nodes have been added',\n 'total_nodes': list(blockchain.neighbours),\n }\n return jsonify(response), 201\n\n\n@app.route('/nodes/resolve', methods=['GET'])\ndef consensus():\n replaced = blockchain.resolve_conflicts()\n\n if replaced:\n response = {\n 'message': 'Our chain was replaced',\n 'new_chain': blockchain.chain\n }\n else:\n response = {\n 'message': 'Our chain is authoritative',\n 'chain': blockchain.chain\n }\n\n return jsonify(response), 200\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5001, type=int, help='port to listen on')\n args = parser.parse_args()\n port = args.port\n\n app.run(host='127.0.0.1', port=port)\n","repo_name":"MagicianQi/blockchain-python","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36401251102","text":"import csv\n\nimport sys\n\nmaxInt = sys.maxsize\n\nwhile True:\n # decrease the maxInt value by factor 10\n # as long as the OverflowError occurs.\n\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt / 10)\n\n\ndef load_csv_as_dict(csv_path, fieldnames=None, delimiter=None):\n \"\"\" Loads the csv DictReader\n\n Parameters\n ----------\n csv_path : str\n Path to csv\n fieldnames : list of str\n List of fieldnames, if None then fieldnames are take from the first row\n delimiter : str\n Delimiter to split on, default \\t\n\n Returns\n -------\n csv.DictReader\n DictReader object of path\n \"\"\"\n\n delimiter = delimiter or \"\\t\"\n f = open(csv_path, encoding='utf8')\n c = csv.DictReader(f, fieldnames=fieldnames, delimiter=delimiter)\n return c\n\n\ndef write_rows_to_csv(rows_to_write, csv_path, fieldnames=None, mode=None, delimiter=None, write_header=None):\n \"\"\" Write the rows the csv at the path\n\n Parameters\n ----------\n rows_to_write : list of dict\n Rows to write to the CSV\n csv_path : str\n Path to csv\n fieldnames : list of str\n List of fieldnames for csv. Default of keys of first row in rows_to_write\n mode : str\n Mode to write to file (w for write, a for append)\n delimiter : str\n Delimiter to join rows on, default \\t\n write_header : bool\n Whether to write header before writing rows, default false\n\n Returns\n -------\n csv.DictWriter\n DictWriter object of path\n \"\"\"\n\n if rows_to_write:\n mode = mode or \"w\"\n write_header = write_header if write_header is not None else False\n delimiter = delimiter or '\\t'\n fieldnames = fieldnames or list(rows_to_write[0].keys())\n f = open(csv_path, mode, encoding='utf8', newline='')\n c = csv.DictWriter(f, fieldnames=fieldnames, delimiter=delimiter)\n if write_header:\n c.writeheader()\n c.writerows(rows_to_write)\n f.close()\n","repo_name":"KieranLitschel/Contextualised-Image-Classifiers","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43474503874","text":"from py2neo import neo4j, Node, rel\nimport linecache\nimport os\nimport json\n\t\t \ndef populate_graph(node):\n previous = None\n for i in range(len(poems[\"abyat\"])):\n if(i == 0):\n first_bayt = poems[\"abyat\"][0][\"bayt\"]\n first_sadr = poems[\"abyat\"][0][\"sadr\"]\n first_ajz = poems[\"abyat\"][0][\"ajez\"]\n first = Node(\"Bayt\", name= first_bayt, sudr = first_sadr, ajez = first_ajz)\n graph_db.create(first)\n graph_db.create(rel(node,\"CONTAINS\",first))\n print(\"see me once\")\n \n if(i == 1):\n bayt = poems[\"abyat\"][1][\"bayt\"]\n sadr = poems[\"abyat\"][1][\"sadr\"]\n ajz = poems[\"abyat\"][1][\"ajez\"]\n following = Node(\"Bayt\", name = bayt, sudr = sadr, ajez = ajz)\n graph_db.create(following)\n graph_db.create(rel(first,\"FOLLOWED_BY\", following))\n print(\"in 1\")\n\n if(i > 1):\n bayt = poems[\"abyat\"][i][\"bayt\"]\n sadr = poems[\"abyat\"][i][\"sadr\"]\n ajz = poems[\"abyat\"][i][\"ajez\"]\n follow = Node(\"Bayt\", name = bayt, sudr = sadr, ajez = ajz)\n graph_db.create(follow)\n if(i == 2):\n graph_db.create(rel(following,\"FOLLOWED_BY\", follow))\n print(\"2\")\n elif(previous is not None):\n graph_db.create(rel(previous,\"FOLLOWED_BY\",follow))\n print(i)\n previous = follow\n\n#unused.\ndef populate_who_wrote_what(node):\n for node in poem_node_list:\n graph_db.create(node)\n graph_db.create(rel(node,\"WROTE\",poem_node))\n print(node)\n\n#TODO: apply the same to all 4 3asrs, after applying them in organizeForGraph.py; change rootdir to 'era'_raw_graph_data, and asr variable node to specific era. end result is a massive graph containing 4 3asrs, all their poets and whos born where, and who wrote what. and the actual poem sequence!\n\nrootdir = '/home/ramez/python/capstone/app/andalsi_raw_graph_data'\n#jsondir = '/home/ramez/python/capstone/app/jahili_json'\n\ntextfiles = glob.glob(os.path.join(rootdir, '*.txt'))\njsonfiles = glob.glob(os.path.join(rootdir, '*.json'))\n\nauthors = []\npoem_list = []\npoem_node_list = []\nauthor_node_list = []\n\nneo4j.authenticate(\"localhost:7474\",\"neo4j\",\"faisal\")\n#default for 7474 is empty. can add url.\ngraph_db = neo4j.Graph()\n\nasr = Node(\"Era\", era = \"العصر الأندلسي\")\ngraph_db.create(asr)\nprint(asr)\n\nfor f in textfiles:\n title = linecache.getline(f,1)# filename, line_number\n titles = title[1:-1].split(',')\n\n author = linecache.getline(f,2)\n# authors.append(author[:-1])\n\n poet = Node(\"Poet\", name = author)\n# author_node_list.append(poet)\n\n graph_db.create(poet)\n graph_db.create(rel(poet,\"BORN_IN\", asr))\n\n for item in titles:\n item = item[2:-1]\n# poem_list.append(item)\n# print(item)\n item = Node(\"Title\", name = item)\n poe = str(item)\n po = poe[15:-3]\n# poem_node_list.append(item)\n graph_db.create(item)\n graph_db.create(rel(poet,\"WROTE\",item))\n\n print(po)\n\n for file in jsonfiles:\n with open(file,\"r\") as f:\n poems = json.load(f)\n if(poems[\"title\"] == po):\n populate_graph(item)\n print(\"worked\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n if(f.endswith(\".json\")):\n with open(f,\"r\") as fi:\n poems = json.load(fi)\n populate_graph(item)'''\n\n\n\n#fw.write(str(poem_node_list))\n#print(len(poem_node_list))\n#fw = open(\"ping.txt\",\"w\")\n#to put each bayt of a certain poem in database graph.\n#title = fi.readlines()[0]\n #author = fi.readlines()[1]\n #era = fi.readlines()[2]\n'''\nfor poet_node in author_node_list:\n graph_db.create(poet_node)\n graph_db.create(rel(poet_node,\"BORN_IN\",asr))\n populate_who_wrote_what(poet_node)\n print(poet_node)\n\n #for poem_node in poem_node_list:\n #graph_db.create(poem_node)\n #graph_db.create(rel(poet_node,\"WROTE\",poem_node))\n #print(poem_node)\n'''\n'''\nfor subdir, dirs, files in os.walk(jsondir):\n for file in files:\n f = os.path.join(subdir, file)\n with open(f,\"r\") as fi:\n poems = json.load(fi)\n #for poet in authors:\n for poet_node in author_node_list:\n graph_db.create(poet_node)\n graph_db.create(rel(poet_node,\"BORN_IN\",asr))\n print(poet_node)\n\n for poem_node in poem_node_list:\n graph_db.create(poem_node)\n graph_db.create(rel(poet_node,\"WROTE\",poem_node))\n print(poem_node)\n\n for poem in poem_list:\n #if(poet == poems[\"author\"] and poem == poems[\"title\"]):\n if(poem == poems[\"title\"]):\n first_bayt = poems[\"abyat\"][0][\"bayt\"]\n first_sadr = poems[\"abyat\"][0][\"sadr\"]\n first_ajz = poems[\"abyat\"][0][\"ajez\"]\n first = Node(\"Bayt\", name= first_bayt, sudr = first_sadr, ajez = first_ajz)\n graph_db.create(first)\n graph_db.create(rel(poem_node,\"CONTAINS\",first))\n\n\n #graph_db.create(first)\n #graph_db.create(rel(poem, \"CONTAINS\", first))\n #print(first)\n\n #poe = Node(\"Poet\", name = poet)\n #graph_db.create(poe)\n #graph_db.create(rel(poe,\"BORN_IN\", asr))\n #fw.write(str(poe))\n\n #tit = Node(\"Title\", title = poem)\n #fw.write(str(tit))\n\n #graph_db.create(tit)\n #graph_db.create(rel(poe,\"WROTE\",tit))\n\n #what if I create the poet node here, and his titles here, and actual poem nodes here!\n #bayt = Node(\"Bayt\", bayt= add props),\n\n #first_bayt = poems[\"abyat\"][0][\"bayt\"]\n #first_sadr = poems[\"abyat\"][0][\"sadr\"]\n #first_ajz = poems[\"abyat\"][0][\"ajez\"]\n #first = Node(\"Bayt\", name= first_bayt, sudr = first_sadr, ajez = first_ajz)\n #graph_db.create(first)\n #graph_db.create(rel(poem, \"CONTAINS\", first))\n #print(poems[\"abyat\"][0][\"sadr\"])\n #print(poet)\n #print(poem)\n #print(poem)\n\n#fw.close()\n'''\n #instead of printing, get the abyat of the specific poem and immediately write to graph db. check orgForGraph and test.py to make sure nothing is wrong.\n\n #graph_db.create(rel(poet,\"BORN_IN\", asr))\n #graph_db.create(poet)\n #print(poet)\n","repo_name":"ramezsw/qaseeda-graph","sub_path":"capstone/app/graphMe.py","file_name":"graphMe.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"26386488605","text":"from readers import Readers as rd\nfrom writers import Writers as wr\nfrom filters import Filters as fl\nfrom judges import Judges as jd\nfrom decorators import Decorators as dc\nfrom calculators_and_converters import CalculatorsAndConverters as cc\n\nclass Master:\n\n def __init__(self, filename_txt, filename_csv, daylight_csv_fn, current_csv_fn, proposed_csv_fn):\n \n # constructing file name attributes (variables) \n self.filename_txt = filename_txt\n self.filename_csv = filename_csv\n self.daylight_csv_fn = daylight_csv_fn\n self.current_csv_fn = current_csv_fn\n self.proposed_csv_fn = proposed_csv_fn\n \n # constructing class suite, assigning variables where necessary\n\n # 1 Readers\n self.rd = rd(self.filename_txt, self.filename_csv)\n self.rd.Twilight = self.rd.Twilight(self.filename_txt)\n self.rd.Observations = self.rd.Observations(self.filename_csv)\n\n # 2 Writers\n self.wr = wr(self.daylight_csv_fn, self.current_csv_fn, self. proposed_csv_fn)\n\n # 3 Filters \n self.fl = fl()\n self.fl.Twilight = self.fl.Twilight()\n self.fl.Observations = self.fl.Observations()\n\n # 4 Judges\n self.jd = jd()\n\n # 5 Decorators\n self.dc = dc()\n\n # 6 CalculatorsAndConverters\n self.cc = cc()\n\n \n \n\n \n\n \n\n\n \n ","repo_name":"spartaninzaghi/Mason-Jewett-Airport-Data-Analysis","sub_path":"master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7552814160","text":"import itertools\n\nsquare_digits = [(x*x//10,x*x%10) for x in range(1,10)]\n\ndef extend_die(die):\n ext = set()\n for x in die:\n ext.add(x)\n if x == 6:\n ext.add(9)\n elif x == 9:\n ext.add(6)\n return ext\n\ndef can_display_squares(a, b):\n a_ = extend_die(a)\n b_ = extend_die(b)\n for x,y in square_digits:\n if (x in a_ and y in b_) or (y in a_ and x in b_):\n pass\n else:\n return False\n return True\n\ndef p90():\n possible_die = tuple(itertools.combinations(range(10), 6))\n n = 0\n for i in range(len(possible_die)):\n for j in range(i, len(possible_die)):\n if can_display_squares(possible_die[i], possible_die[j]):\n n += 1\n return n\n","repo_name":"jacksonfellows/euler","sub_path":"python/090_Cube_digit_pairs.py","file_name":"090_Cube_digit_pairs.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25530476969","text":"import random\nimport sys\n\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import QPainter, QColor\nfrom PyQt5.QtWidgets import QWidget, QApplication\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n uic.loadUi('UI.ui', self)\n self.pushButton.clicked.connect(self.paint)\n self.do_paint = False\n\n def paintEvent(self, event):\n # Создаем объект QPainter для рисования\n qp = QPainter()\n # Начинаем процесс рисования\n qp.begin(self)\n self.draw(qp)\n qp.end()\n\n def paint(self):\n self.do_paint = True\n self.repaint()\n\n def draw(self, qp):\n rad = random.randint(10, 150)\n qp.setPen(QColor(255, 255, 0))\n x = random.randint(10, 500)\n y = random.randint(10, 500)\n qp.drawEllipse(x, y, 2 * rad, 2 * rad)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec())\n","repo_name":"just-zen/yellow_sircles","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73206593798","text":"from ..Node import Node\nfrom .BinaryTree import BinaryTree\n\nclass BinarySearchTree(BinaryTree):\n def insert(self, value):\n parent = None\n x = self.root\n while(x):\n parent = x\n if value < x.data:\n x = x.left\n else:\n x = x.right\n if parent is None:\n self.root = Node(value)\n elif value < parent.data:\n parent.left = Node(value)\n else:\n parent.right = Node(value)\n\n def search(self, value):\n return self._search(value, self.root)\n\n def _search(self, value, node):\n if node is None:\n return node\n if node.data == value:\n return BinarySearchTree(node)\n if value < node.data:\n return self._search(value, node.left)\n return self._search(value, node.right)\n\n def min(self, node=self.ROOT):\n if node == self.ROOT:\n node = self.root\n while node.left:\n node = node.left\n return node.data\n\n def max(self, node=self.ROOT):\n if node == self.ROOT:\n node = self.root\n while node.right:\n node = node.right\n return node.data\n\n def remove(self, value, node=self.ROOT):\n if node == self.ROOT:\n node = self.root\n if node is None:\n return node\n if value < node.data:\n node.left = self.remove(value, node.left)\n elif value > node.data:\n node.right = self.remove(value, node.right)\n else:\n self._remove(value, node)\n\n return node\n\n def _remove(self, value, node=None):\n if node is None:\n return\n\n if node.left is None:\n return node.right\n elif node.right is None:\n return node.left\n else:\n self._remove_swap(node.data, node.right)\n\n def _remove_swap(self, data, node_right):\n substitute = self.min(node_right)\n data = substitute\n node_right = self.remove(substitute, node_right)\n","repo_name":"Ellian-aragao/IFB-IC","sub_path":"python/tree/binaryTrees/binarySearchTree.py","file_name":"binarySearchTree.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3750843389","text":"from unittest.mock import Mock\n\nimport pytest\nfrom application.actions.forticloud_poller import ForticloudPoller\nfrom application.repositories.notifications_repository import NotificationsRepository\nfrom application.repositories.redis_repository import RedisRepository\nfrom config import testconfig as config\nfrom tests.fixtures._helpers import wrap_all_methods\n\n\n@pytest.fixture(scope=\"function\")\ndef nats_client():\n return Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef redis():\n return Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef scheduler():\n return Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef notifications_repository(nats_client):\n instance = NotificationsRepository(nats_client=nats_client, config=config)\n wrap_all_methods(instance)\n\n return instance\n\n\n@pytest.fixture(scope=\"function\")\ndef redis_repository(redis):\n instance = RedisRepository(\n redis=redis,\n )\n wrap_all_methods(instance)\n\n return instance\n\n\n@pytest.fixture(scope=\"function\")\ndef forticloud_poller(nats_client, scheduler, notifications_repository, redis_repository):\n instance = ForticloudPoller(\n nats_client=nats_client,\n scheduler=scheduler,\n config=config,\n redis_repository=redis_repository,\n notifications_repository=notifications_repository,\n )\n wrap_all_methods(instance)\n\n return instance\n","repo_name":"Bruin-Dev/Intelygenz","sub_path":"services/forticloud-poller/src/tests/fixtures/instances.py","file_name":"instances.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16527557994","text":"# This is the configuration file for the 0D model\n# Note that the prescribed pressure is fixed\n\n# ============================================================================= \n# Configuration file of VULCAN: \n# ============================================================================= \n\n# ====== Setting up the elements included in the network ======\natom_list = ['H', 'O', 'C', 'He'] # For checking element conservation\n\n# ====== Set up paths and filenames for the input and output files ======\n\nuse_venot_network = True # Only True while running Olivia's netowrk\n\nnetwork = 'thermo/vulcan_venot19_reduced.txt' #ISSI_CHO_network.txt (my network used for the ISSI test) or wang_venot_network.txt \ngibbs_text = 'thermo/gibbs_text.txt' # all the nasa9 files must be placed in the folder: thermo/NASA9/\ncom_file = 'thermo/all_compose_venot.txt' # basic chemistry data (stoichiometric numbers and mass)\noutput_dir = 'output/' # output directory\nplot_dir = 'plot/' # plot directory\nout_name = 'wang-CH2-0D-800K_1e-3bar.vul' # output name \ny_time_freq = 1 # The frequency (every _ steps) to store the calculation\n\n\n\n\n# ====== Setting up the elemental abundance ======\nuse_solar = False \n# If True: using the default solar abundance from Table 10. (K.Lodders 2009)\n# if False, using the customized elemental abundance below\n# customized elemental abundance\nO_H = 6.0618E-4 \nC_H = 2.7761E-4 \nN_H = 8.1853E-5\nHe_H = 0.09691\n\nini_mix = 'const_mix' # The initial abundances\n# Options: 'EQ' -- equilibrium chemistry, 'const_mix' -- prescribed below\nconst_mix = {'H2':0.8317, 'CH2':6.653E-4, 'O':1.331E-3, 'He':1.663E-1}\n\n# ====== Reactions to be switched off ======\nremove_list = []\n\n# ====== Setting up parameters for the 0-D \"box\" ======\nnz = 1 # always 1 for 0D \nT_box = 800. # temperature (K)\np_box = 1e3 # pressure (dyne/cm^2)\n# Both T_box and p_box are always kept fixed (even when the total number density changes due to chemical reactions)\natm_base = 'H2' # The bulk gas: changes the efficeny factor in 3-body reactions in Venot's network\n# options: 'H2','O2','CO','CO2','H2O','CH4','N2','NH3'\n\n# condensation\nuse_condense = False\ncondesne_sp = [\"H2O\"] \nnon_gas_sp = ['H2O_l_s']\nstart_conden_time = 1e7\nuse_sharks = False\n\n# ====== Setting up the photochemistry (Not relevant for the 0D box wo. photochemistry) ======\nuse_photo = False\nexcit_sp = ['O_1', 'CH2_1'] # N_D to avoid in the initial abundances by fc\nscat_sp = ['N2', 'O2'] # # the molecules that contribute to Rayleigh scattering\nr_star = 1. #0.752 HD209: 1.118\norbit_radius = 1. #0.03142 # planet-star distance in A.U.\nsl_angle = 48 /180.*3.14159 # the zenith angle of the star\nedd = 0.669 #(cos(48 deg) ) # the Eddington coefficient\ndbin = 0.2\n\n\n# ====== Setting up general parameters for the ODE solver (No need to change anything here) ====== \node_solver = 'Ros2' \nuse_print_prog = True\nuse_height = True\nprint_prog_num = 200\nuse_live_plot = False\nuse_live_flux = 0\nuse_save_movie = 0\nuse_flux_movie = True\nlive_plot_frq = 10\nuse_plot_end = False\nuse_plot_evo = False\nplot_TP = 1\noutput_humanread = False\n#plot_spec = ['H', 'H2', 'CH3', 'CH4', 'CO', 'CH3OH', 'CH2OH', 'He']\n#live_plot_spec = ['H', 'H2', 'H2O', 'CH4', 'CO', 'CO2', 'C2H2', 'C2H4', 'C2H6', 'CH3OH']\n#live_plot_spec = ['H2O', 'H2O_l_s', 'CO2', 'CH4', 'NO', 'NO2', 'HNO3', 'O3','N2O', 'NH3', 'O2']\n# frequency to update the flux and tau\n# ini_update_photo_frq = 20\n# final_update_photo_frq = 5\n# update_frq = 100 # for updating dz and dzi due to change of mu\n\n# ====== steady state check ======\nst_factor = 0.05 \ncount_min = 100\n\n# ====== Setting up numerical parameters for the ODE solver ====== \ndttry = 1.E-10 # the initial stepsize (s) \n#dt_std = 1. \ntrun_min = 1e2\nruntime = 1.E26 # max runtime\ncount_max = int(5E4) # max steps\ndt_min = 1.E-14 # min stepsize\ndt_max = runtime*1e-5 # mxn stepsize\ndt_var_max = 2. # max factor of varing the stepsize\ndt_var_min = 0.5 # min factor of varing the stepsize\natol = 1.e-2 # absolute tolorence\nmtol = 1.E-30 # relative tolorence\nmtol_conv = 1.E-20 \npos_cut = 0\nnega_cut = -1.\nloss_eps = 1e3 #1e-1\nyconv_cri = 0.01 # for checking steady-state\nslope_cri = 1.e-10\nyconv_min = 0.1\nslope_min = 1.e-12\n\nflux_cri = 5.e-2 #0.1\nflux_atol = 1. # the tol for actinc flux (# photons cm-2 s-1 nm-1)\n\n\n# ====== Setting up numerical parameters for Ros2 ODE solver ====== \nrtol = 0.05 # the tolorence for the numerical truncation errors. Larger values run faster but might be unstable\n# suggested values: 0.01 ~ 0.1 \n\n# ====== Setting up numerical parameters for SemiEu/SparSemiEU ODE solver (Not relavent) ====== \nPItol = 0.1\nuse_PIL = True","repo_name":"shami-EEG/VULCAN-0D","sub_path":"vulcan_cfg.py","file_name":"vulcan_cfg.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13251670621","text":"def row_sum_odd_numbers(n):\n first = n * (n - 1) + 1\n last = first + 2 * (n - 1)\n row = (first + last) * n // 2\n return row\n\n\nn =3\nfirst= n * (n -1 ) + 1\nprint(first)\nlast= first + 2 * (n -1)\nprint(last)\nrow = (first + last) * n // 2\nprint(row)\n\n# Best solution:\ndef n_row_odd_triangle(n):\n n ** 3","repo_name":"adamFernandez/python-exercises","sub_path":"exercises/functional_programming/15_odd_triangle.py","file_name":"15_odd_triangle.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37333193529","text":"import os\nimport tempfile\n\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom header_model.model import ACCESS_SPECIFIER_PROTECTED, ACCESS_SPECIFIER_PUBLIC, ACCESS_SPECIFIER_PRIVATE\n\n\nclass CppGenerationHelper:\n def __init__(self, model):\n self.model = model\n self.TYPE_INCLUDE_MAP = {\n \"flags\": '#include \"flags/flags.h\"',\n }\n\n def name(self):\n return self.model.get_class().get_name()\n\n def gen_class_declaration(self):\n decl = f\"class {self.name()}\"\n base_class = self.model.get_class().get_base_class()\n if base_class:\n decl += f\" : public {base_class}\"\n return decl\n\n def gen_system_includes_from_header(self):\n includes = [inc for inc in self.model.get_includes() if \"<\" in inc]\n if \"#include \" not in includes:\n includes.insert(0, \"#include \")\n return includes\n\n def _map_forward_declarations_to_includes(self):\n tokens = self.model.get_class().get_forward_declarations()\n\n includes = self._map_types_to_includes(tokens)\n return includes\n\n def _map_return_types_to_includes(self):\n methods = self.model.get_class().get_methods()\n tokens = [method.return_type for method in methods]\n\n includes = self._map_types_to_includes(tokens)\n\n return includes\n\n def _map_types_to_includes(self, tokens):\n includes = []\n for token in tokens:\n if token in self.TYPE_INCLUDE_MAP:\n includes.append(self.TYPE_INCLUDE_MAP[token])\n return includes\n\n def _map_argument_type_to_includes(self):\n all_includes = []\n methods = self.model.get_class().get_methods()\n for method in methods:\n tokens = method.argument_list.split()\n\n includes = self._map_types_to_includes(tokens)\n all_includes.extend(includes)\n\n return all_includes\n\n def _gen_project_includes_from_header(self):\n includes = [inc for inc in self.model.get_includes() if '\"' in inc]\n\n includes.extend(self._map_forward_declarations_to_includes())\n includes.extend(self._map_return_types_to_includes())\n includes.extend(self._map_argument_type_to_includes())\n base_class = self.model.get_class().get_base_class()\n if base_class:\n includes.append(f'#include \"{base_class}.h\"')\n return includes\n\n def gen_includes(self):\n includes = self._gen_project_includes_from_header()\n return includes\n\n def _gen_forward_declarations_from_argument_types(self):\n decls = self.model.get_class().get_forward_declarations()\n ret = []\n for declaration in decls:\n ret.append(f\"class {declaration};\")\n return ret\n\n @staticmethod\n def _remove_duplicates(forward_decls):\n return list(dict.fromkeys(forward_decls))\n\n def gen_forward_declarations(self):\n forward_decls = self._gen_forward_declarations_from_argument_types()\n forward_decls.extend(self.model.get_forward_declaration())\n forward_decls = self._remove_duplicates(forward_decls)\n return forward_decls\n\n def gen_typedefs(self):\n return \"\\n\".join(self.model.get_typedefs())\n\n def gen_friend_declarations(self):\n friend_decls = self.model.get_class().get_friend_decls()\n return \"\\n\".join(friend_decls)\n\n @staticmethod\n def _map_types(argument_list):\n ARGUMENT_TYPE_MAP = {\"long\": \"long int\"}\n for k, v in ARGUMENT_TYPE_MAP.items():\n argument_list = argument_list.replace(k, v)\n return argument_list\n\n def gen_get_class_name(self):\n override = \"override\" if self.model.get_class().get_base_class() else \"\"\n code = \"virtual std::string className() const \" + override + ' {return \"' + self.name() + '\";}'\n return code\n\n def gen_cpp_methods(self):\n klass = self.model.get_class()\n visibility = \"PUBLIC\"\n for method in klass.get_methods():\n const_txt = \" const \" if method.is_const else \"\"\n override_txt = \"\"\n if method.name == \"init\":\n if len(method.argument_list) > 0 and not method.has_override:\n override_txt = \"\"\n elif klass.get_base_class():\n override_txt = \"override\"\n elif method.has_override: # if in source: force 'override'\n override_txt = \"override\"\n\n static_or_virtual_txt = \"static \" if method.is_static else \"virtual\"\n return_type = self._map_types(method.return_type)\n visibility_code = \"\"\n if method.access_specifier != visibility:\n visibility_code = method.access_specifier.lower() + \":\\n\"\n visibility = method.access_specifier\n method_code = f\"{visibility_code}\\t{static_or_virtual_txt} {return_type} {method.name}({self._map_types(method.argument_list)}){const_txt} {override_txt};\\n\"\n yield method_code\n\n def has_init_method(self):\n return self.model.get_class().has_init_method()\n\n def _gen_member_variables(self, access_specifier):\n variables = self.model.get_class().get_member_variables()\n variables = [var for var in variables if var.access == access_specifier]\n code = \"\"\n for var in variables:\n static = \"static\" if var.static else \"\"\n const = \"const\" if var.const else \"\"\n type_name = self._map_types(var.type_name)\n code += f'{static} {const} {type_name} {var.variable_name} {var.default_value_assignment};'\n return code\n\n def gen_public_member_variables(self):\n return self._gen_member_variables(ACCESS_SPECIFIER_PUBLIC)\n\n def gen_protected_member_variables(self):\n return self._gen_member_variables(ACCESS_SPECIFIER_PROTECTED)\n\n def gen_private_member_variables(self):\n return self._gen_member_variables(ACCESS_SPECIFIER_PRIVATE)\n\n def gen_using_declarations(self):\n using_decls = self.model.get_using_declarations()\n return \"\\n\".join(using_decls)\n\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nTEMPLATES_FOLDER = os.path.join(DIR_PATH, \"templates\")\nMODULE_DIR = os.path.join(tempfile.gettempdir(), os.path.split(DIR_PATH)[-1])\nENCODING = 'ascii'\nenv = Environment(\n line_statement_prefix='%',\n line_comment_prefix='##',\n loader=FileSystemLoader(TEMPLATES_FOLDER, encoding=ENCODING),\n autoescape=False,\n auto_reload=True, # set to false for performance improvement in production\n keep_trailing_newline=True,\n trim_blocks=False,\n lstrip_blocks=False\n)\n\n\ndef generate_h_file(model):\n cpp_model = CppGenerationHelper(model)\n return _generate_output_file(\"CppHeaderTemplate.jinja2\", cpp_model)\n\n\ndef _generate_output_file(template_file, cpp_model):\n template = env.get_template(template_file)\n templated_out_f = template.render(cpp_model=cpp_model)\n return templated_out_f\n","repo_name":"LukasWoodtli/python_antlr_jinja_example","sub_path":"templated_code_generation.py","file_name":"templated_code_generation.py","file_ext":"py","file_size_in_byte":6978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6285746057","text":"import logging\nimport os\nimport re\nimport subprocess\nimport time\nfrom pathlib import Path\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\n\nfrom ambulance.models import Ambulance, \\\n AmbulanceCapability\nfrom hospital.models import Hospital\nfrom equipment.models import EquipmentType, Equipment, EquipmentItem\nfrom login.models import GroupAmbulancePermission, GroupHospitalPermission, \\\n UserAmbulancePermission, UserHospitalPermission\nfrom mqtt.client import BaseClient\nfrom mqtt.subscribe import SubscribeClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass MQTTTestCase(StaticLiveServerTestCase):\n\n def __init__(self, *args, **kwargs):\n\n # call super\n super().__init__(*args, **kwargs)\n\n @classmethod\n def run_until_success(cls, args, **kwargs):\n\n # parameters\n MAX_TRIES = kwargs.pop('MAX_TRIES', 10)\n \n # keep trying \n k = 0\n success = False\n while not success and k < MAX_TRIES:\n if k > 0:\n time.sleep(1)\n k += 1\n retval = subprocess.run(args, **kwargs)\n success = retval.returncode == 0\n\n if not success:\n raise Exception('Did not succeed!')\n\n @classmethod\n def run_until_fail(cls, args, **kwargs):\n\n # parameters\n MAX_TRIES = kwargs.pop('MAX_TRIES', 10)\n \n # keep trying \n k = 0\n success = True\n while success and k < MAX_TRIES:\n if k > 0:\n time.sleep(1)\n k += 1\n retval = subprocess.run(args, **kwargs)\n success = retval.returncode == 0\n\n if success:\n raise Exception('Did not fail!')\n \n @classmethod\n def setUpClass(cls):\n\n try:\n\n # can get user?\n User.objects.get(username=settings.MQTT['USERNAME'])\n\n except:\n\n # Add admin user\n User.objects.create_user(\n username=settings.MQTT['USERNAME'],\n email='admin@user.com',\n password=settings.MQTT['PASSWORD'],\n is_superuser=True)\n \n # call super to create server\n super().setUpClass()\n\n # determine server and port\n protocol, host, port = cls.live_server_url.split(':')\n host = host[2:]\n \n print('\\n>> Starting django server at {}'.format(cls.live_server_url))\n \n print('>> Stoping mosquitto')\n \n # stop mosquito server\n retval = subprocess.run([\"service\",\n \"mosquitto\",\n \"stop\"])\n\n # print('>> Stoping mqttclient')\n \n # # stop mqttclient\n # retval = subprocess.run([\"supervisorctl\",\n # \"stop\",\n # \"mqttclient\"])\n \n # # Wait for shutdown\n # cls.run_until_fail([\"service\",\n # \"mosquitto\",\n # \"status\"])\n\n time.sleep(2)\n \n try:\n\n # saving persistence file\n os.rename(\"/var/lib/mosquitto/mosquitto.db\",\n \"/var/lib/mosquitto/mosquitto.db.bak\")\n\n except:\n print(\"* * * CAN'T BACKUP MOSQUITTO PERSISTENCE FILE * * *\")\n\n # Does configuration exist?\n config = Path(\"/etc/mosquitto/conf.d/default.conf\")\n if not config.is_file():\n\n # Can't find configuration, can we recover from backup?\n try:\n\n # move current configuration file\n os.rename(\"/etc/mosquitto/conf.d/default.conf.bak\",\n \"/etc/mosquitto/conf.d/default.conf\")\n\n print('* * * MOSQUITTO/DEFAULT.CONF RECOVERED * * *')\n \n except:\n raise Exception(\"Can't find /etc/mosquitto/conf.d/default.conf.\")\n \n # create test configuration file\n with open('/etc/mosquitto/conf.d/test.conf', \"w\") as outfile:\n \n # change default host and port\n cat = subprocess.Popen([\"cat\",\n \"/etc/mosquitto/conf.d/default.conf\"],\n stdout= subprocess.PIPE)\n sed = subprocess.run([\"sed\",\n \"s/8000/{}/\".format(port)],\n stdin=cat.stdout,\n stdout=outfile)\n cat.wait()\n\n # move current configuration file\n os.rename(\"/etc/mosquitto/conf.d/default.conf\",\n \"/etc/mosquitto/conf.d/default.conf.bak\")\n\n print('>> Start mosquitto with test settings')\n\n # start mosquito server\n retval = subprocess.run([\"service\",\n \"mosquitto\",\n \"start\"])\n\n # Wait for start\n cls.run_until_success([\"service\",\n \"mosquitto\",\n \"status\"])\n\n\n time.sleep(2)\n \n cls.setUpTestData()\n\n @classmethod\n def tearDownClass(cls):\n\n # call super to shutdown server\n super().tearDownClass()\n \n print('>> Stopping mosquitto with test settings')\n \n # stop mosquito server\n retval = subprocess.run([\"service\",\n \"mosquitto\",\n \"stop\"])\n \n # # Wait for shutdown\n # cls.run_until_fail([\"service\",\n # \"mosquitto\",\n # \"status\"])\n \n time.sleep(2)\n \n # remove test configuration file\n os.rename(\"/etc/mosquitto/conf.d/test.conf\",\n \"/etc/mosquitto/conf.d/test.conf.bak\")\n \n # restore current configuration file\n os.rename(\"/etc/mosquitto/conf.d/default.conf.bak\",\n \"/etc/mosquitto/conf.d/default.conf\")\n\n try:\n \n # restore persistence file\n os.rename(\"/var/lib/mosquitto/mosquitto.db.bak\",\n \"/var/lib/mosquitto/mosquitto.db\")\n except:\n print(\"* * * CAN'T RECOVER MOSQUITTO PERSISTENCE FILE * * *\")\n \n print('>> Starting mosquitto')\n \n # start mosquito server\n retval = subprocess.run([\"service\",\n \"mosquitto\",\n \"start\"])\n \n # Wait for start\n cls.run_until_success([\"service\",\n \"mosquitto\",\n \"status\"])\n \n # print('>> Starting mqttclient')\n \n # # start mqttclient\n # retval = subprocess.run([\"supervisorctl\",\n # \"start\",\n # \"mqttclient\"])\n\n time.sleep(2)\n \n # from django.db import connections\n\n # for conn in connections.all():\n # conn.close()\n \n @classmethod\n def setUpTestData(cls):\n\n # Retrieve admin\n cls.u1 = User.objects.get(username=settings.MQTT['USERNAME'])\n\n try:\n \n # Add users\n cls.u2 = User.objects.get(username='testuser1')\n cls.u3 = User.objects.get(username='testuser2')\n cls.u4 = User.objects.get(username='testuser3')\n cls.u5 = User.objects.get(username='testuser4')\n cls.u6 = User.objects.get(username='highprioritytestuser')\n cls.u7 = User.objects.get(username='lowprioritytestuser')\n cls.u8 = User.objects.get(username='staff')\n\n # Add ambulances\n cls.a1 = Ambulance.objects.get(identifier='BC-179')\n cls.a2 = Ambulance.objects.get(identifier='BC-180')\n cls.a3 = Ambulance.objects.get(identifier='BC-181')\n\n # Add hospitals\n cls.h1 = Hospital.objects.get(name='Hospital General')\n cls.h2 = Hospital.objects.get(name='Hospital CruzRoja')\n cls.h3 = Hospital.objects.get(name='Hospital Nuevo')\n\n # Add equipment\n cls.e1 = Equipment.objects.get(name='X-ray')\n cls.e2 = Equipment.objects.get(name='Beds')\n cls.e3 = Equipment.objects.get(name='MRI - Ressonance')\n \n # add hospital equipment\n cls.he1 = EquipmentItem.objects.get(equipmentholder=cls.h1.equipmentholder,\n equipment=cls.e1)\n \n cls.he2 = EquipmentItem.objects.get(equipmentholder=cls.h1.equipmentholder,\n equipment=cls.e2)\n\n cls.he3 = EquipmentItem.objects.get(equipmentholder=cls.h2.equipmentholder,\n equipment=cls.e1)\n \n cls.he4 = EquipmentItem.objects.get(equipmentholder=cls.h2.equipmentholder,\n equipment=cls.e3)\n \n cls.he5 = EquipmentItem.objects.get(equipmentholder=cls.h3.equipmentholder,\n equipment=cls.e1)\n\n except:\n\n # Add users\n cls.u2 = User.objects.create_user(\n username='testuser1',\n email='test1@user.com',\n password='top_secret')\n \n cls.u3 = User.objects.create_user(\n username='testuser2',\n email='test2@user.com',\n password='very_secret')\n\n cls.u4 = User.objects.create_user(\n username='testuser3',\n email='test3@user.com',\n password='highly_secret')\n\n cls.u5 = User.objects.create_user(\n username='testuser4',\n email='test4@user.com',\n password='extremely_secret')\n\n cls.u6 = User.objects.create_user(\n username='highprioritytestuser',\n email='test6@user.com',\n password='exceptionally_secret')\n\n cls.u7 = User.objects.create_user(\n username='lowprioritytestuser',\n email='test7@user.com',\n password='exceedingly_secret')\n\n cls.u8 = User.objects.create_user(\n username='staff',\n email='staff@user.com',\n password='so_secret',\n is_staff=True)\n\n # Add ambulances\n cls.a1 = Ambulance.objects.create(\n identifier='BC-179',\n comment='Maintenance due',\n capability=AmbulanceCapability.B.name,\n updated_by=cls.u1)\n \n cls.a2 = Ambulance.objects.create(\n identifier='BC-180',\n comment='Need painting',\n capability=AmbulanceCapability.A.name,\n updated_by=cls.u1)\n \n cls.a3 = Ambulance.objects.create(\n identifier='BC-181',\n comment='Engine overhaul',\n capability=AmbulanceCapability.R.name,\n updated_by=cls.u1)\n \n # Add hospitals\n cls.h1 = Hospital.objects.create(\n name='Hospital General',\n number=\"1234\",\n street=\"don't know\",\n comment=\"no comments\",\n updated_by=cls.u1)\n \n cls.h2 = Hospital.objects.create(\n name='Hospital CruzRoja',\n number=\"4321\",\n street='Forgot',\n updated_by=cls.u1)\n \n cls.h3 = Hospital.objects.create(\n name='Hospital Nuevo',\n number=\"0000\",\n street='Not built yet',\n updated_by=cls.u1)\n \n # add equipment\n cls.e1 = Equipment.objects.create(\n name='X-ray',\n type=EquipmentType.B.name)\n \n cls.e2 = Equipment.objects.create(\n name='Beds',\n type=EquipmentType.I.name)\n \n cls.e3 = Equipment.objects.create(\n name='MRI - Ressonance', # name with space!\n type=EquipmentType.B.name)\n \n # add hospital equipment\n cls.he1 = EquipmentItem.objects.create(\n equipmentholder=cls.h1.equipmentholder,\n equipment=cls.e1,\n value='True',\n updated_by=cls.u1)\n \n cls.he2 = EquipmentItem.objects.create(\n equipmentholder=cls.h1.equipmentholder,\n equipment=cls.e2,\n value='45',\n updated_by=cls.u1)\n\n cls.he3 = EquipmentItem.objects.create(\n equipmentholder=cls.h2.equipmentholder,\n equipment=cls.e1,\n value='False',\n updated_by=cls.u1)\n \n cls.he4 = EquipmentItem.objects.create(\n equipmentholder=cls.h2.equipmentholder,\n equipment=cls.e3,\n value='True',\n updated_by=cls.u1)\n \n cls.he5 = EquipmentItem.objects.create(\n equipmentholder=cls.h3.equipmentholder,\n equipment=cls.e1,\n value='True',\n updated_by=cls.u1)\n\n # add hospitals to users\n UserHospitalPermission.objects.create(user=cls.u2,\n hospital=cls.h1,\n can_write=True)\n UserHospitalPermission.objects.create(user=cls.u2,\n hospital=cls.h3)\n\n UserHospitalPermission.objects.create(user=cls.u3,\n hospital=cls.h1)\n UserHospitalPermission.objects.create(user=cls.u3,\n hospital=cls.h2,\n can_write=True)\n\n # u3 has no hospitals\n\n # add ambulances to users\n UserAmbulancePermission.objects.create(user=cls.u1,\n ambulance=cls.a2,\n can_write=True)\n\n # u2 has no ambulances\n\n UserAmbulancePermission.objects.create(user=cls.u3,\n ambulance=cls.a1,\n can_read=False)\n UserAmbulancePermission.objects.create(user=cls.u3,\n ambulance=cls.a3,\n can_write=True)\n\n # Create groups\n cls.g1 = Group.objects.create(name='EMTs')\n cls.g2 = Group.objects.create(name='Drivers')\n cls.g3 = Group.objects.create(name='Dispatcher')\n\n # add hospitals to groups\n GroupHospitalPermission.objects.create(group=cls.g1,\n hospital=cls.h1,\n can_write=True)\n GroupHospitalPermission.objects.create(group=cls.g1,\n hospital=cls.h3)\n\n GroupHospitalPermission.objects.create(group=cls.g2,\n hospital=cls.h1)\n GroupHospitalPermission.objects.create(group=cls.g2,\n hospital=cls.h2,\n can_write=True)\n\n # g3 has no hospitals\n\n # add ambulances to groups\n GroupAmbulancePermission.objects.create(group=cls.g1,\n ambulance=cls.a2,\n can_write=True)\n\n # g2 has no ambulances\n\n GroupAmbulancePermission.objects.create(group=cls.g3,\n ambulance=cls.a1,\n can_read=False)\n GroupAmbulancePermission.objects.create(group=cls.g3,\n ambulance=cls.a3,\n can_write=True)\n\n cls.u4.groups.set([cls.g2])\n cls.u5.groups.set([cls.g1, cls.g3])\n\n\nclass MQTTTestClientPublishSubscribeMixin:\n\n def __init__(self, *args, **kwargs):\n\n # call supper\n super().__init__(*args, **kwargs)\n\n # publishing and subscribing\n self.publishing = 0\n self.subscribing = 0\n\n def has_published(self):\n return self.publishing == 0\n\n def has_subscribed(self):\n return self.subscribing == 0\n\n def done(self):\n return self.has_published() and self.has_subscribed()\n\n def publish(self, topic, payload=None, qos=0, retain=False):\n\n # publish\n self.publishing += 1\n if self.debug:\n logger.debug(\"Publishing to '{}', publishing={}\".format(topic, self.publishing))\n\n super().publish(topic, payload, qos, retain)\n\n def on_publish(self, client, userdata, mid):\n\n # did publish?\n super().on_publish(client, userdata, mid)\n self.publishing -= 1\n\n if self.debug:\n logger.debug(\"Just published mid={}, publishing={}]\".format(mid, self.publishing))\n\n def subscribe(self, topic, qos=0):\n\n # publish\n self.subscribing += 1\n if self.debug:\n logger.debug(\"Subscribing to '{}', subscribing={}\".format(topic, self.subscribing))\n\n super().subscribe(topic, qos)\n\n def on_subscribe(self, client, userdata, mid, granted_qos):\n\n # did subscribe?\n super().on_subscribe(client, userdata, mid, granted_qos)\n self.subscribing -= 1\n\n if self.debug:\n logger.debug('Just subscribed mid={}, qos={}, subscribing={}'.format(mid, granted_qos, self.subscribing))\n\n\nclass MQTTTestSubscribeClient(MQTTTestClientPublishSubscribeMixin,\n SubscribeClient):\n\n def __init__(self, *args, **kwargs):\n\n # call supper\n super().__init__(*args, **kwargs)\n\n\n# MQTTTestClient\nclass MQTTTestClient(MQTTTestClientPublishSubscribeMixin,\n BaseClient):\n\n def __init__(self, *args, **kwargs):\n\n self.check_payload = kwargs.pop('check_payload', True)\n \n # call supper\n super().__init__(*args, **kwargs)\n\n # expect\n self.expecting_topics = {}\n self.expecting_messages = {}\n self.expecting_patterns = {}\n self.expecting = 0\n\n def is_expecting(self):\n return self.expecting > 0\n\n def done(self):\n return super().done() and not self.is_expecting()\n\n # The callback for when a subscribed message is received from the server.\n def on_message(self, client, userdata, msg):\n\n if msg.topic in self.expecting_topics:\n\n # regular topic\n topic = msg.topic\n\n else:\n \n # can it be a pattern?\n match = False\n for k, p in self.expecting_patterns.items():\n if p.match(msg.topic):\n # initialize topic\n topic = k\n match = True\n break\n\n if not match:\n # did not match\n raise Exception(\"Unexpected message topic '{}'\".format(msg.topic))\n\n # handle expected message\n self.expecting_topics[topic] += 1\n self.expecting -= 1\n\n # is message payload expected? remove\n try:\n \n self.expecting_messages[topic].remove(msg.payload)\n\n except ValueError:\n\n if self.check_payload:\n raise Exception('Unexpected message \"{}:{}\"'.format(msg.topic, msg.payload))\n\n if self.debug:\n logger.debug('Just received {}[count={},expecting={}]:{}'.format(msg.topic,\n self.expecting_topics[topic],\n self.expecting,\n msg.payload))\n\n def expect(self, topic, msg=None, qos=2):\n\n # not subscribed\n if topic not in self.expecting_topics:\n\n # pattern topic?\n if '+' in topic or '#' in topic:\n pattern = topic.replace('+', '[^/]+').replace('#', '[a-zA-Z0-9_/ ]+')\n self.expecting_patterns[topic] = re.compile(pattern)\n # print('pattern = {}'.format(pattern))\n\n # initialize\n self.expecting_topics[topic] = 0\n self.expecting_messages[topic] = []\n\n # and subscribe\n self.subscribe(topic, qos)\n\n else:\n\n logger.debug(\"Already subscribed to topic '{}'\".format(topic))\n\n self.expecting += 1\n self.expecting_messages[topic].append(msg)\n\n\nclass TestMQTT:\n\n DELAY = 0.1\n\n def __init__(self, *args, **kwargs):\n\n # call super\n super().__init__(*args, **kwargs)\n\n def is_connected(self, client, max_tries=10):\n\n # loop\n client.loop()\n\n # connected?\n k = 0\n while (not client.is_connected()) and k < max_tries:\n k += 1\n client.loop()\n time.sleep(TestMQTT.DELAY)\n\n self.assertEqual(client.is_connected(), True)\n\n def is_disconnected(self, client, max_tries=10):\n\n # loop\n client.loop()\n\n # disconnected?\n k = 0\n while (client.is_connected()) and k < max_tries:\n k += 1\n client.loop()\n time.sleep(TestMQTT.DELAY)\n\n self.assertEqual(client.is_connected(), False)\n\n def is_subscribed(self, client, max_tries=10):\n\n # loop\n client.loop()\n\n # client.loop_start()\n\n # connected?\n k = 0\n while (not client.has_subscribed()) and k < max_tries:\n k += 1\n client.loop()\n time.sleep(TestMQTT.DELAY)\n\n # client.loop_stop()\n\n logger.debug('has_subscribed = {}, k = {}'.format(client.has_subscribed(), k))\n\n self.assertEqual(client.has_subscribed(), True)\n\n def loop(self, *clients, max_tries=10):\n\n # logger.debug('clients = {}'.format(clients))\n # logger.debug('MAX_TRIES = {}'.format(MAX_TRIES))\n\n # starts clients\n for client in clients:\n client.loop()\n\n # connected?\n k = 0\n done = False\n while not done and k < max_tries:\n done = True\n for client in clients:\n done = done and client.done()\n k += 1\n # stop clients\n for client in clients:\n client.loop()\n time.sleep(TestMQTT.DELAY)\n\n if not done:\n # logger.debug('NOT DONE:')\n for client in clients:\n if hasattr(client, 'expecting'):\n logger.debug('expecting = {}'.format(client.expecting))\n if hasattr(client, 'publishing'):\n logger.debug('publishing = {}'.format(client.publishing))\n if hasattr(client, 'subscribing'):\n logger.debug('subscribing= {}'.format(client.subscribing))\n\n self.assertEqual(done, True)\n","repo_name":"EMSTrack/WebServerAndClient","sub_path":"mqtt/tests/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":23663,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"17635066040","text":"\"\"\"\nYou are given two integers, x and y, which represent your current location on a Cartesian grid: (x, y). You are also given an array points where each points[i] = [ai, bi] represents that a point exists at (ai, bi). A point is valid if it shares the same x-coordinate or the same y-coordinate as your location.\n\nReturn the index (0-indexed) of the valid point with the smallest Manhattan distance from your current location. If there are multiple, return the valid point with the smallest index. If there are no valid points, return -1.\n\nThe Manhattan distance between two points (x1, y1) and (x2, y2) is abs(x1 - x2) + abs(y1 - y2).\n\nExample 1:\n\nInput: x = 3, y = 4, points = [[1,2],[3,1],[2,4],[2,3],[4,4]]\nOutput: 2\nExplanation: Of all the points, only [3,1], [2,4] and [4,4] are valid. Of the valid points, [2,4] and [4,4] have the smallest \nManhattan distance from your current location, with a distance of 1. [2,4] has the smallest index, so return 2.\n\nExample 2:\n\nInput: x = 3, y = 4, points = [[3,4]]\nOutput: 0\nExplanation: The answer is allowed to be on the same location as your current location.\n\nExample 3:\n\nInput: x = 3, y = 4, points = [[2,3]]\nOutput: -1\nExplanation: There are no valid points.\n\"\"\"\ndef nearestValidPoint(x, y, points):\n manhDist, indexPoint = 0,0\n tempmanhDist, tempIndex = 0,0\n check = False\n for index,point in enumerate(points):\n if point[0] == x and point[1] == y:\n check = True\n return points.index(point)\n else:\n if point[0] == x or point[1] == y:\n check = True\n tempmanhDist = abs(x - point[0]) + abs(y - point[1])\n tempIndex = index\n if manhDist > tempmanhDist:\n manhDist = tempmanhDist\n indexPoint = tempIndex\n if indexPoint == 0:\n manhDist = tempmanhDist\n indexPoint = tempIndex\n if check: \n return indexPoint\n else:\n return -1\n\n\nprint(nearestValidPoint(3,4,[[1,2],[3,1],[2,4],[2,3],[4,4]]))\n ","repo_name":"Kalai-code/Coding","sub_path":"nearestValidPoint_prob1779.py","file_name":"nearestValidPoint_prob1779.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39265113943","text":"import sys\nsys.path.append('../../')\nfrom cop_e_cat.copecat import CopECat, CopECatParams\nimport numpy as np\nfrom sklearn import preprocessing\nimport pickle\nimport os\nimport pandas as pd\n\nstate_feats = ['anchor_age', 'patientweight', 'gender',\n 'cad', 'afib', 'chf', 'ckd', 'esrd', 'paralysis', 'parathyroid',\n 'rhabdo', 'sarcoid', 'sepsis', 'expired', 'bpdia', 'bpsys', 'hr', 'rr',\n 'spo2', 'temp', 'alt', 'aniongap', 'bun', 'cpk', 'ca', 'chloride',\n 'creatinine', 'glucose', 'hgb', 'k', 'ldh', 'mg', 'na', 'p', 'wbc',\n 'betablockers', 'ca-iv', 'ca-noniv', 'cablockers', 'dextrose',\n 'hours-dextrose', 'fluids', 'insulin', 'k-iv', 'hours-k-iv', 'k-noniv',\n 'hours-k-noniv', 'loopdiuretics', 'hours-loopdiuretics', 'mg-iv',\n 'hours-mg-iv', 'mg-noniv', 'hours-mg-noniv', 'p-iv', 'hours-p-iv',\n 'p-noniv', 'hours-p-noniv', 'pnutrition', 'ponutrition', 'tpnutrition',\n 'vasopressors', 'hours-betablockers', 'hours-cablockers',\n 'hours-insulin', 'hours-ca-noniv', 'hours-vasopressors']\n\ndef state_transformer(frames, params, states=None, feats=state_feats):\n\tprint('#:', len(feats))\n\ttransformer = params.output_dir + 'transformer.pkl'\n\tif states is None:\n\t\tstates = np.vstack([np.array(frames.loc[i, feats]).astype(float) for i in range(len(frames))])\n\tif os.path.isfile(transformer):\n\t\tscaler = pickle.load(open(transformer, 'rb'))\n\telse:\n\t\tscaler = preprocessing.StandardScaler().fit(states)\n\t\tpickle.dump(scaler, open(params.output_dir + 'transformer.pkl', 'wb'))\n\ttransformed_states = scaler.transform(states)\n\treturn transformed_states\n\n\ndef state_invtransformer(tstates, params):\n\ttransformer = params.output_dir + 'tranformer.pkl'\n\tscaler = pickle.load(open(transformer, 'rb'))\n\tstates = scaler.inverse_transform(tstates)\n\n\treturn states\n\n\ndef transform(state, params):\n\ttransformer = params.output_dir + 'transformer.pkl'\n\tif os.path.isfile(transformer):\n\t\tscaler = pickle.load(open(transformer, 'rb'))\n\t\treturn scaler.transform([state])[0]\n\telse:\n\t\treturn state\n\n\ndef discretize(a, el='K'):\n\tif el == 'K':\n\t\tadict = {'none': 0, 'low2-iv': 0, 'low4-iv': 0, 'low6-iv': 0, 'high1-iv': 0,\n\t\t\t\t 'high2-iv': 0, 'high3-iv': 0, 'low-po': 0, 'med-po': 0, 'high-po': 0}\n\t\t# What is ivd, ivh?\n\t\tivd = float(a[0])\n\t\tivh = float(a[2])\n\t\torald = float(a[1])\n\t\t# print(ivd, ivh, orald)\n\n\t\tif ivd > 0:\n\t\t\tif ivh == 0.0: ivh = 1.0\n\t\t\trate = ivd / ivh\n\n\t\t\tif rate <= 10:\n\t\t\t\tif ivh <= 2:\n\t\t\t\t\tadict['low2-iv'] = 1\n\t\t\t\telif ivh <= 4:\n\t\t\t\t\tadict['low4-iv'] = 1\n\t\t\t\telse:\n\t\t\t\t\tadict['low6-iv'] = 1\n\n\t\t\tif rate > 10:\n\t\t\t\tif ivh <= 1:\n\t\t\t\t\tadict['high1-iv'] = 1\n\t\t\t\telif ivh <= 2:\n\t\t\t\t\tadict['high2-iv'] = 1\n\t\t\t\telse:\n\t\t\t\t\tadict['high3-iv'] = 1\n\n\t\tif orald > 0:\n\t\t\tif orald <= 20:\n\t\t\t\tadict['low-po'] = 1\n\t\t\telif orald <= 40:\n\t\t\t\tadict['med-po'] = 1\n\t\t\telse:\n\t\t\t\tadict['high-po'] = 1\n\n\n\telif el == 'Mg':\n\n\t\tadict = {'none': 0, 'low4-iv': 0, 'high1-iv': 0, 'high2-iv': 0, 'high3-iv': 0, 'low-po': 0, 'med-po': 0, 'high-po': 0}\n\n\t\tivd = float(a[0])\n\t\tivh = float(a[2])\n\t\torald = float(a[1])\n\t\t# print(ivd, ivh, orald)\n\n\t\tif ivd > 0:\n\t\t\tif ivd > 4: ivd = 4\n\t\t\tif ivh == 0.0: ivh = 1.0\n\t\t\trate = ivd / ivh\n\t\t\tif rate < 1: adict['low4-iv'] = 1\n\t\t\tif rate >= 1:\n\t\t\t\tif ivh <= 1:\n\t\t\t\t\tadict['high1-iv'] = 1\n\t\t\t\telif ivh <= 2:\n\t\t\t\t\tadict['high2-iv'] = 1\n\t\t\t\telse:\n\t\t\t\t\tadict['high3-iv'] = 1\n\n\t\tif orald > 0:\n\t\t\tif orald < 400:\n\t\t\t\tadict['low-po'] = 1\n\t\t\telif orald < 800:\n\t\t\t\tadict['med-po'] = 1\n\t\t\telse:\n\t\t\t\tadict['high-po'] = 1\n\n\n\telif el == 'P':\n\t\tadict = {'none': 0, 'low2-iv': 0, 'high1-iv': 0, 'high3-iv': 0, 'low-po': 0, 'med-po': 0, 'high-po': 0}\n\n\t\tivd = float(a[0])\n\t\tivh = float(a[2])\n\t\torald = float(a[1])\n\t\t# print(ivd, ivh, orald)\n\n\t\tif ivd > 0:\n\t\t\tif ivh == 0.0: ivh = 1.0\n\t\t\trate = ivd / ivh\n\n\t\t\tif rate <= 1: adict['low2-iv'] = 1\n\t\t\tif rate > 1:\n\t\t\t\tif ivh < 6:\n\t\t\t\t\tadict['high1-iv'] = 1\n\t\t\t\telse:\n\t\t\t\t\tadict['high3-iv'] = 1\n\n\t\tif orald > 0:\n\t\t\tif orald < 250:\n\t\t\t\tadict['low-po'] = 1\n\t\t\telif orald < 500:\n\t\t\t\tadict['med-po'] = 1\n\t\t\telse:\n\t\t\t\tadict['high-po'] = 1\n\n\tda = list(adict.values())\n\tif sum(da) == 0: da[0] = 1\n\n\treturn da\n\n\ndef reward(s, a, ns, w=np.array([1, 1, 1, 1, 1]) / 5., el='K'):\n\trdict = {'cost-iv': 0, 'cost-po': 0, 'high': 0, 'low': 0, 'other': 0}\n\n\tif a[0] > 0: rdict['cost-iv'] -= 1\n\tif a[1] > 0: rdict['cost-po'] -= 1\n\n\tif el == 'K': rdict['high'], rdict['low'] = sigmoid(ns[0], el=el)\n\tif el == 'Mg': rdict['high'], rdict['low'] = sigmoid(ns[1], el=el)\n\tif el == 'P': rdict['high'], rdict['low'] = sigmoid(ns[2], el=el)\n\t# What does this mean? And-ing a set of floats\n\t# if el == 'K':\n\t# print(str(s[30]))\n\t# print(str(s[31]))\n\t# print(str(s[32]))\n\t# rdict['other'] = -1 * (s[30] & s[31] & s[32])\n\n\tphi = np.array(list(rdict.values()))\n\tr = np.dot(phi, w)\n\n\treturn phi, r\n\n\ndef sigmoid(x, el='K'):\n\tminmax = {'K': [3.5, 4.5], 'Mg': [1.5, 2.5], 'P': [2.5, 4.5]}\n\tlmin, lmax = minmax[el]\n\n\tif x < lmin:\n\t\tz = 1 / (1 + np.exp(-3.5 * (x - (lmin - 1)))) - 1\n\t\treturn (0, z)\n\telif x > lmax:\n\t\tz = - 1 / (1 + np.exp(-3.5 * (x - (lmax + 1))))\n\t\treturn (z, 0)\n\telse:\n\t\tz = 0\n\t\treturn (z, z)\n\n\ndef generate_samples(vnum, trainFrames, el='K'):\n\tframe = trainFrames[trainFrames.hadm_id == vnum]\n\tall_st = []\n\tall_nst = []\n\tall_a = []\n\tall_phi = []\n\tall_r = []\n\n\tfor i in frame.index[:-1]:\n\n\t\ts = list(frame.loc[i, state_feats])\n\t\tst = transform(s)\n\t\tall_st.append(st)\n\t\tif el == 'K':\n\t\t\ta = list(frame.loc[i + 1, ['k-iv', 'k-noniv', 'hours-k-iv']])\n\t\t# a = list(frame.loc[i + 1, ['k-iv', 'k-noniv']])\n\t\telif el == 'Mg':\n\t\t\ta = list(frame.loc[i + 1, ['mg-iv', 'mg-noniv', 'hours-mg-iv']])\n\t\t# a = list(frame.loc[i + 1, ['mg-iv', 'mg-noniv']])\n\t\telif el == 'P':\n\t\t\ta = list(frame.loc[i + 1, ['p-iv', 'p-noniv', 'hours-p-iv']])\n\t\t# a = list(frame.loc[i + 1, ['p-iv', 'p-noniv']])\n\t\tda = discretize(a, el=el)\n\t\tall_a.append(da)\n\t\tns = list(frame.loc[i + 1, state_feats])\n\t\tnst = transform(ns)\n\t\tall_nst.append(nst)\n\t\tphi, r = reward(s, a, ns, el=el)\n\t\tall_phi.append(phi)\n\t\tall_r.append(r)\n\t# print('s:', s, '\\n\\na:', a, '\\n\\nns', ns, '\\n\\nr', phi, r)\n\n\treturn (all_st, all_a, all_nst, all_phi, all_r)\n\n\ndef combine(ent):\n\treturn np.concatenate(np.array(ent))\n\n\ndef get_tuples(frames, params, filename='tuples.pkl', el='K'):\n\ttransition_tuples = {'s': [], 'a': [], 'ns': [], 'phi': [], 'r': [], 'vnum': []}\n\n\tif el == 'K':\n\t\tvisits = frames[(frames['k-iv'] != 0) | (frames['k-noniv'] != 0)].hadm_id.unique()\n\telif el == 'Mg':\n\t\tvisits = frames[(frames['mg-iv'] != 0) | (frames['mg-noniv'] != 0)].hadm_id.unique()\n\telif el == 'P':\n\t\tvisits = frames[(frames['p-iv'] != 0) | (frames['p-noniv'] != 0)].hadm_id.unique()\n\telse:\n\t\tvisits = frames.visit_num.unique()\n\n\tfor vnum in visits:\n\t\tif len(frames[frames.hadm_id == vnum]) > 1:\n\t\t\ts, a, ns, phi, r = generate_samples(vnum, frames, el)\n\t\t\ttransition_tuples['s'].append(np.array(s))\n\t\t\ttransition_tuples['a'].append(np.array(a))\n\t\t\ttransition_tuples['ns'].append(ns)\n\t\t\ttransition_tuples['phi'].append(phi)\n\t\t\ttransition_tuples['r'].append(r)\n\t\t\ttransition_tuples['vnum'].append(np.repeat(vnum, len(r)))\n\n\tfor k in transition_tuples.keys():\n\t\ttransition_tuples[k] = combine(transition_tuples[k])\n\n\tpickle.dump(transition_tuples, open(params.output_dir + filename, 'wb'))\n\n\treturn transition_tuples\n\nif __name__ == '__main__':\n\tparams = CopECatParams('params.json')\n\tprint(\"Generating state spaces\")\n\tcopecat = CopECat(params)\n\tcopecat.generate_state_spaces()\n\tallFrames = pd.read_csv(params.output_dir + 'allFrames.csv')\n\n\tprint('Total number of processed adms =', len(allFrames.hadm_id.unique()),'; number of transitions =', len(allFrames))\n\tallFrames = allFrames.sort_values(by=['hadm_id', 'timestamp'])\n \n\ttrainFrames = allFrames[:int(len(allFrames)*0.75)].reset_index()\n\ttestFrames = allFrames[int(len(allFrames)*0.75):].reset_index()\n\ttrainFrames.to_csv(params.output_dir+'trainFrames.csv', index=False)\n\ttestFrames.to_csv(params.output_dir+'testFrames.csv', index=False)\n\timport ipdb; ipdb.set_trace()\n\tprint('Transformer')\n\tts = state_transformer(trainFrames, params)\n\tvisits = allFrames.hadm_id.unique()\n\n\tprint('Training set:')\n\tprint('Total number of processed adms =',len(trainFrames.hadm_id.unique()),'; number of transitions =',len(trainFrames))\n\t# if False:\n\tprint ('Potassium Cohort size')\n\tprint('Number administered iv K =', len(allFrames[allFrames['k-iv'] != 0].hadm_id.unique()))\n\tprint('Number administered oral K =', len(allFrames[allFrames['k-noniv'] != 0].hadm_id.unique()))\n\tprint('Get tuples')\n\tget_tuples(trainFrames, params, filename='trainKtuples.pkl', el='K')\n\tget_tuples(testFrames, params, filename='testKtuples.pkl', el='K')","repo_name":"aishwarya-rm/cop-e-cat","sub_path":"vignettes/electrolyte_repletion/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":8610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"17611867406","text":"# Дан список чисел. Определите, сколько в нем встречается различных чисел.\n\nnumber = int(input('Введите количество чисел в списке: '))\nimport random\nlist_1 = []\nset_1 = set()\n\nfor i in range(number):\n list_1.append(random.randint(0, 10))\n set_1.add(list_1[i])\n\nprint(list_1)\n\nprint(set_1)\nprint(len(set_1))\n\n","repo_name":"969279/PythonHomework","sub_path":"PythonSeminars/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29773680118","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import unicode_literals\n# django\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\n# trigger happy\nfrom django_th.models import TriggerService\nfrom django_th.publish import Pub\n\nfrom logging import getLogger\nfrom multiprocessing import Pool, TimeoutError\n# create logger\nlogger = getLogger('django_th.trigger_happy')\n\n\nclass Command(BaseCommand):\n\n help = 'Trigger all the services and publish the data coming from the cache'\n\n def handle(self, *args, **options):\n \"\"\"\n get all the triggers that need to be handled\n \"\"\"\n from django.db import connection\n connection.close()\n failed_tries = settings.DJANGO_TH.get('failed_tries', 10)\n trigger = TriggerService.objects.filter(\n Q(provider_failed__lte=failed_tries) |\n Q(consumer_failed__lte=failed_tries),\n status=True,\n user__is_active=True,\n provider__name__status=True,\n consumer__name__status=True,\n ).select_related('consumer__name', 'provider__name')\n try:\n with Pool(processes=settings.DJANGO_TH.get('processes')) as pool:\n p = Pub()\n result = pool.map_async(p.publishing, trigger)\n result.get(timeout=60)\n except TimeoutError as e:\n logger.warning(e)\n","repo_name":"foxmask/django-th","sub_path":"django_th/management/commands/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":1350,"dataset":"github-code","pt":"62"} +{"seq_id":"36978907073","text":"from article.models import Article\r\nfrom django.contrib.auth.models import User\r\nfrom django.db import models\r\nfrom django.utils import timezone\r\n\r\n\r\nclass Comment(models.Model):\r\n author = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"comments\")\r\n article = models.ForeignKey(\r\n Article, on_delete=models.CASCADE, related_name=\"comments\"\r\n )\r\n parent = models.ForeignKey(\r\n \"self\",\r\n null=True,\r\n blank=True,\r\n on_delete=models.SET_NULL,\r\n related_name=\"children\",\r\n )\r\n content = models.TextField(verbose_name=\"评论\")\r\n created = models.DateTimeField(default=timezone.now)\r\n\r\n def __str__(self):\r\n return self.content[:20]\r\n\r\n class Meta:\r\n db_table = \"comment_db\"\r\n ordering = [\"-created\"]\r\n","repo_name":"Sonder-MX/Blogs-Django-Vue","sub_path":"blog_dv/comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"62"} +{"seq_id":"6973745998","text":"from functools import partial\nfrom typing import Tuple, Callable\n\nimport numpy as np\nfrom emukit.core import ParameterSpace, ContinuousParameter\n\n\ndef _hartmann_function(\n x: np.ndarray, alpha: np.ndarray, A: np.ndarray, P: np.ndarray\n) -> np.ndarray:\n \"\"\"The hartmann function.\n\n Parameters\n ----------\n x:\n Numerical representation of the points for which the function should be\n evaluated. shape = (n_points, n_features)\n alpha:\n The parameters of the Hartmann function. shape = (4,)\n A:\n The A-matrix, see function documentation. shape = (4, n_features)\n P:\n The P-matrix, see function documentation. shape = (4, n_features)\n\n Returns\n -------\n y:\n The function value. shape = (n_points,)\n\n \"\"\"\n exponent = np.exp(\n -np.sum(A[:, :, None] * (x.T[None, :, :] - P[:, :, None]) ** 2, axis=1)\n )\n y = (-alpha[None, :] @ exponent).reshape(-1, 1)\n return y\n\n\ndef hartmann3d_function(\n x: np.ndarray,\n alpha1: float = 1.0,\n alpha2: float = 1.2,\n alpha3: float = 3.0,\n alpha4: float = 3.2,\n output_noise: float = 0.0,\n) -> np.ndarray:\n alpha = np.array([alpha1, alpha2, alpha3, alpha4])\n A = np.array([[3.0, 10, 30], [0.1, 10, 35], [3.0, 10, 30], [0.1, 10, 35]])\n P = 1e-4 * np.array(\n [\n [3689, 1170, 2673],\n [4699, 4387, 7470],\n [1091, 8732, 5547],\n [381, 5743, 8828],\n ]\n )\n y = _hartmann_function(x, alpha, A, P)\n y += np.random.normal(loc=0.0, scale=output_noise, size=y.shape)\n return y\n\n\ndef hartmann3d(\n alpha1: float = None,\n alpha2: float = None,\n alpha3: float = None,\n alpha4: float = None,\n) -> Tuple[Callable, ParameterSpace]:\n if alpha1 is None:\n alpha1 = np.random.uniform(low=1.0, high=1.02)\n if alpha2 is None:\n alpha2 = np.random.uniform(low=1.18, high=1.2)\n if alpha3 is None:\n alpha3 = np.random.uniform(low=2.8, high=3.0)\n if alpha4 is None:\n alpha4 = np.random.uniform(low=3.2, high=3.4)\n\n return partial(\n hartmann3d_function, alpha1=alpha1, alpha2=alpha2, alpha3=alpha3, alpha4=alpha4\n ), ParameterSpace(\n [\n ContinuousParameter(\"x1\", 0.0, 1.0),\n ContinuousParameter(\"x2\", 0.0, 1.0),\n ContinuousParameter(\"x3\", 0.0, 1.0),\n ]\n )\n\n\ndef hartmann6d_function(\n x: np.ndarray,\n alpha1: float = 1.0,\n alpha2: float = 1.2,\n alpha3: float = 3.0,\n alpha4: float = 3.2,\n output_noise: float = 0.0,\n):\n alpha = np.array([alpha1, alpha2, alpha3, alpha4])\n A = np.array(\n [\n [10, 3, 17, 3.5, 1.7, 8],\n [0.05, 10, 17, 0.1, 8, 14],\n [3, 3.5, 1.7, 10, 17, 8],\n [17, 8, 0.05, 10, 0.1, 14],\n ]\n )\n P = 1e-4 * np.array(\n [\n [1312, 1696, 5569, 124, 8283, 5886],\n [2329, 4135, 8307, 3736, 1004, 9991],\n [2348, 1451, 3522, 2883, 3047, 6650],\n [4047, 8828, 8732, 5743, 1091, 381],\n ]\n )\n y = _hartmann_function(x, alpha, A, P)\n y += np.random.normal(loc=0.0, scale=output_noise, size=y.shape)\n return y\n\n\ndef hartmann6d(\n alpha1: float = None,\n alpha2: float = None,\n alpha3: float = None,\n alpha4: float = None,\n) -> Tuple[Callable, ParameterSpace]:\n if alpha1 is None:\n alpha1 = np.random.uniform(low=1.0, high=1.02)\n if alpha2 is None:\n alpha2 = np.random.uniform(low=1.18, high=1.2)\n if alpha3 is None:\n alpha3 = np.random.uniform(low=2.8, high=3.0)\n if alpha4 is None:\n alpha4 = np.random.uniform(low=3.2, high=3.4)\n\n return partial(\n hartmann6d_function, alpha1=alpha1, alpha2=alpha2, alpha3=alpha3, alpha4=alpha4\n ), ParameterSpace(\n [\n ContinuousParameter(\"x1\", 0.0, 1.0),\n ContinuousParameter(\"x2\", 0.0, 1.0),\n ContinuousParameter(\"x3\", 0.0, 1.0),\n ContinuousParameter(\"x4\", 0.0, 1.0),\n ContinuousParameter(\"x5\", 0.0, 1.0),\n ContinuousParameter(\"x6\", 0.0, 1.0),\n ]\n )\n","repo_name":"boschresearch/transfergpbo","sub_path":"transfergpbo/benchmarks/hartmann.py","file_name":"hartmann.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"9896118889","text":"# В исходном текстовом файле (hotline.txt) после фразы «Горячая линия» добавить\n# фразу «Министерства образования Ростовской области», посчитать количество\n# произведённых добавлений. Сколько номеров телефонов заканчивается на «03»,\n# «50». Вывести номера телефонов горячих линий, связанных с ЕГЭ/ГИА.\nimport re\n\nhot = re.compile(r\"Горячая линия\")\nnum = re.compile(r\"(03)|(50)\")\nege = re.compile(r\"ЕГЭ.*(8863[0-9]{7})\")\nwith open('hotline.txt', 'r', encoding='utf-8') as file:\n text = file.read()\n a = hot.subn(\"Горячая линия Министерства образования Ростовской области\", text)\n print(a[0], \"\\n\" \"Кол-во произведенных добавлений:\", a[1])\n print('Кол-во номеров заканчивающихся на \"03\" и \"50\":', len(num.findall(text)))\n print(\"Номера связанные с ЕГЭ и ГИА:\", ', '.join(ege.findall(text)))\n","repo_name":"Krevetka0/Proj_1sem_Vlasenko","sub_path":"PZ_14/PZ_14_1.py","file_name":"PZ_14_1.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"450775594","text":"# author - Logan\r\n# Time - April 16 2017\r\n# Place - CUHKSZ\r\n\r\nimport os\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport re\r\n\r\n# Load all files from a directory in a DataFrame.\r\ndef load_directory_data(directory):\r\n data = {}\r\n data[\"sentence\"] = []\r\n data[\"sentiment\"] = []\r\n for file_path in os.listdir(directory):\r\n with tf.gfile.GFile(os.path.join(directory, file_path), \"r\") as f:\r\n data[\"sentence\"].append(f.read())\r\n data[\"sentiment\"].append(re.match(\"\\d+_(\\d+)\\.txt\", file_path).group(1))\r\n return pd.DataFrame.from_dict(data)\r\n\r\n# Merge positive and negative examples, add a polarity column and shuffle.\r\ndef load_dataset(directory):\r\n pos_df = load_directory_data(os.path.join(directory, \"pos\"))\r\n neg_df = load_directory_data(os.path.join(directory, \"neg\"))\r\n pos_df[\"polarity\"] = 1\r\n neg_df[\"polarity\"] = 0\r\n return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)\r\n\r\n# Download and process the dataset files.\r\ndef download_and_load_datasets(force_download=False):\r\n\r\n train_path = os.path.join(\"./data\",\"aclImdb\", \"train\")\r\n\r\n test_path = os.path.join(\"./data\",\"aclImdb\", \"test\")\r\n\r\n train_df = load_dataset(train_path)\r\n print('finish train_df')\r\n test_df = load_dataset(test_path)\r\n print('finish test_df')\r\n\r\n return train_df, test_df\r\n\r\n\r\ntrain_df, test_df = download_and_load_datasets()\r\n\r\ntrain_df = pd.concat([train_df.sentence, train_df.polarity], axis=1)\r\ntest_df = pd.concat([test_df.sentence, test_df.polarity], axis=1)\r\n\r\nprint(train_df.head())\r\nprint(test_df.head())\r\ntrain_df.to_csv('./data/imdb_train.csv')\r\ntest_df.to_csv('./data/imdb_test.csv')\r\nprint('end')\r\n","repo_name":"hszhoushen/text_classification","sub_path":"sentiment-classification/load_imdbdata.py","file_name":"load_imdbdata.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17499850427","text":"def hanoi(n, origen, destino, auxiliar):\r\n \"\"\"\r\n Función recursiva que resuelve el problema de la Torre de Hanoi y muestra la salida en cada movimiento.\r\n\r\n Parámetros:\r\n - n: número de discos.\r\n - origen: poste donde se encuentran los discos inicialmente.\r\n - destino: poste donde se quiere mover los discos.\r\n - auxiliar: poste auxiliar para realizar los movimientos.\r\n\r\n \"\"\"\r\n if n == 1:\r\n # Si sólo hay un disco, lo mueve de origen a destino.\r\n print(\"Mover disco 1 de\", origen, \"a\", destino)\r\n return\r\n \r\n # Mueve n-1 discos de origen a auxiliar, usando destino como poste auxiliar.\r\n hanoi(n-1, origen, auxiliar, destino)\r\n \r\n # Mueve el disco n de origen a destino.\r\n print(\"Mover disco\", n, \"de\", origen, \"a\", destino)\r\n \r\n # Mueve n-1 discos de auxiliar a destino, usando origen como poste auxiliar.\r\n hanoi(n-1, auxiliar, destino, origen)\r\n\r\n\r\n# Pedimos al usuario el número de discos\r\nn = int(input(\"Introduce el número de discos: \"))\r\n\r\n# Llamamos a la función hanoi\r\nhanoi(n, 'A', 'C', 'B')\r\n","repo_name":"EdwarNolasco/Edwar_Nolasco_AlgoritmosyEstructuras","sub_path":"Cap6/torredehanoi.py","file_name":"torredehanoi.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74034733064","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport collections \nimport csv\nimport numpy as np\nimport re\nimport string\nimport time\nimport sys, os \n\nfrom feature_extractor import *\n\nfilename = 'one-hot-dataset-small.pkl'\n\n\nclass Comment: \n\tdef __init__(self, example_id, words, labels, chars):\n\t\tself.example_id = example_id\n\t\tself.words = words\n\t\tself.labels = labels\n\t\tself.chars = chars \n\nclass DataSet:\n\tCLASSES = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n\tTRAIN_CSV = \"data/train.csv\"\n\tTEST_CSV = \"data/test.csv\"\n\tUNKNOWN_WORD = \"\"\n\tMIN_WORD_COUNT = 20\n\n\t# csv_filename = CSV file to read the comment data from\n\t# feature_extractor = function that converts a list of words into a list of word embeddings\n\tdef __init__(self, csv_filename, feature_extractor, count=None, test=False, use_glove=False, character_level=False, verbose=False):\n\t\tself.test = test\n\t\tself.character_level = character_level\n\t\tstart_time = int(round(time.time() * 1000)) \n\t\tself.comments, self.vocab = self.load_data(csv_filename, count) \n\t\tself.vocab = [] if self.test else DataSet.prune_vocabulary(self.vocab, use_glove)\n\t\tend_time = int(round(time.time() * 1000))\n\n\t\tself.feature_extractor = feature_extractor\n\t\tself.verbose = verbose\n\t\tself.x = None\n\t\tself.y = None\n\t\t\n\t\tif self.verbose:\n\t\t\tprint('Loaded {0} comments from \"{1}\" in {2} seconds.'.format(\n\t\t\t\tlen(self.comments),\n\t\t\t\tcsv_filename,\n\t\t\t\t(end_time-start_time) / 1000.0\n\t\t\t))\n\t\t\tprint('Vocabulary size = {0}'.format(len(self.vocab)))\n\n\t# Splits the input |text| into a list of words.\n\t# TODO: We may want to remove stop words and/or change this parsing in some way.\n\t@staticmethod\n\tdef split_into_words(text):\n\t\treturn re.findall(r\"[\\w'-]+|[.,!?;]\", text)\n\n\t@staticmethod\n\tdef get_glove_vocab():\n\t\tglove_vocab = set()\n\t\twith open('glove.twitter.27B/glove.twitter.27B.200d.txt') as f:\n\t\t \tfor line in f:\n\t\t \t\tglove_vocab.add(line.split(' ')[0])\n\t\treturn glove_vocab\n\n\t# Processes vocabulary by removing some subset of the words\n\t@staticmethod\n\tdef prune_vocabulary(vocab, use_glove):\n\t\tif use_glove:\n\t\t\t# words in glove vocab and comment vocab\n\t\t\tglove_vocab = DataSet.get_glove_vocab().intersection(set(vocab.elements())) if use_glove else set()\n\t\telse:\n\t\t\tglove_vocab = set()\n\n\t\t# Only include words that occur >= MIN_WORD_COUNT times\n\t\tcomment_vocab = set([word for word, count in vocab.items() if count >= DataSet.MIN_WORD_COUNT])\n\t\t\n\t\tvocab = sorted(list(comment_vocab.union(glove_vocab))) \n\t\tvocab.append(DataSet.UNKNOWN_WORD)\n\n\t\treturn { word:index for index, word in enumerate(vocab) }\n\t\t\n\t# Loads all of the comment data from the given |csv_filename|, only reads\n\t# the first |count| comments from the dataset (for debugging)\n\tdef load_data(self, csv_filename, character_level, count=None):\n\t\tcomments = []\n\t\tvocab = collections.Counter()\n\t\tprint(\"Started loading data\")\n\t\twith open(csv_filename) as csvfile:\n\t\t\treader = csv.DictReader(csvfile)\n\t\t\tfor i, row in enumerate(reader):\n\t\t\t\tif i == count: break\n\n\t\t\t\twords = DataSet.split_into_words(row['comment_text']) #list \n\t\t\t\tlabels = None if self.test else set([c for c in DataSet.CLASSES if row[c] == '1'])\n\t\t\n\t\t\t\tchars = None \n\t\t\t\tif self.character_level: \n\t\t\t\t\ttxt = ''\n\t\t\t\t\tfor word in words: \n\t\t\t\t\t\tfor c in word: \n\t\t\t\t\t\t\ttxt += c \n\t\t\t\t\t\ttxt += ' '\n\t\t\t\t\tchars = list(txt)\n\n\t\t\t\tcomments.append(Comment(row['id'], words, labels, chars))\n\t\t\t\t\n\t\t\t\tif not self.test:\n\t\t\t\t\tif self.character_level:\n\t\t\t\t\t\tvocab.update([ch.lower() for ch in chars])\n\t\t\t\t\telse:\n\t\t\t\t\t\tvocab.update([word.lower() for word in words])\n\t\tprint(\"Finished loading data\")\n\t\treturn comments, vocab\n\n\t# Converts a set of |labels| into the appropriate \"one-hot\" vector (i.e. there will\n\t# be ones in the indices corresponding to the input |labels|)\n\t@staticmethod\n\tdef to_label_vector(labels):\n\t\treturn np.array([ 1 if klass in labels else 0 for klass in DataSet.CLASSES ])\n\n\t# Returns the fully preprocessed input (x) and output (y):\n\t# (x) will be a list of numpy arrays with shape (comment length, embedding size)\n\t# - Each element of x is a list of word embeddings for the words in the comment.\n\t# (y) will be a list of numpy arrays with shape (# of classes, )\n\tdef get_data(self):\n\t\tif self.x is None:\n\t\t\tstart_time = int(round(time.time() * 1000))\n\t\t\tself.x = [ self.feature_extractor.parse(c.words, self.vocab, self.character_level, c.chars) for c in self.comments ]\n\t\t\tif not self.test:\n\t\t\t\tself.y = [ DataSet.to_label_vector(c.labels) for c in self.comments ]\n\t\t\tend_time = int(round(time.time() * 1000))\n\n\t\t\tif self.verbose:\n\t\t\t\tprint('Processing data (int get_data()) took {0} seconds.'.format((end_time-start_time) / 1000.0))\n\n\t\treturn self.x, self.y\n\n\t# Takes in a |model| and evaluates its performance on this dataset\n\t# TODO: Implement This (probably want loss, accuracy, precision, recall, F1, etc.). \n\tdef evaluate_model(self, model):\n\t\t#y_hat = [ model.predict(x_value) for x_value in self.x ]\n\t\tpass\n\n# Debugging / Testing code\nif __name__ == \"__main__\":\n\tfeature_extractor = OneHotFeatureExtractor(100) \n\tdata = DataSet(DataSet.TRAIN_CSV, feature_extractor, count=None, use_glove=False, character_level=True, verbose=True)\n\tx, y = data.get_data()\n\n","repo_name":"ashemag/cs224n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39426474154","text":"import urllib.request\nimport json\nimport pandas as pd\nimport datetime\n\nserviceKey = 'vTfdXtGsny7xRerwunqDq1PezLyeNmoFmRIx2YwnlYV4sA8EZfBl%2BKsVcjjugqAkuFX1KCdo4lag3L91sHFX%2BQ%3D%3D'\n\ndef getRequestUrl(url):\n '''\n URL 접속 요청 후 응답함수\n ---------------------------------\n parameter : url -> OPENAPI 전체 URL\n '''\n req = urllib.request.Request(url)\n\n try:\n res = urllib.request.urlopen(req)\n if res.getcode() == 200:\n print(f\"[{datetime.datetime.now()} ] Url Request Success\")\n return res.read().decode('utf-8')\n except Exception as e:\n print(e)\n print(f\"[{datetime.datetime.now()}] Error for URL : {url}\")\n return None\n\n\ndef getInterSectionInfo():\n service_url = 'http://apis.data.go.kr/6260000/CrossCartypeTrafficeVolumeService/getCrossCartypeTrafficeVolumeList'\n params = f'?serviceKey={serviceKey}' # 인증키\n params += f'&numOfRows=10'\n params += f'&pageNo=1'\n params += f'&resultType=json'\n params += f'&CLCT_DT=201809051205'\n url = service_url + params\n \n retData = getRequestUrl(url)\n\n if retData == None:\n return None\n else:\n return json.loads(retData)\n\n\ndef getInterSectionService():\n result = []\n\n jsonData = getInterSectionInfo()\n \n if jsonData['getCrossCartypeTrafficVolumeList']['header']['code'] == '00':\n if jsonData['getCrossCartypeTrafficVolumeList']['item'] == '':\n print('서비스 오류!!')\n else:\n \n for item in jsonData['getCrossCartypeTrafficVolumeList']['item']:\n ISTL_LCTN = item['ISTL_LCTN']\n CLCT_DT = item['CLCT_DT']\n SUM_LRGTFVL = item['SUM_LRGTFVL']\n Y_CRDN = item['Y_CRDN']\n IXR_ID = item['IXR_ID']\n IXR_NM = item['IXR_NM']\n SUM_MDDLTFVL = item['SUM_MDDLTFVL']\n SUM_SMALTFVL = item['SUM_SMALTFVL']\n X_CRDN = item['X_CRDN']\n CMRA_ID = item['CMRA_ID']\n \n result.append([ISTL_LCTN,CLCT_DT,SUM_LRGTFVL,Y_CRDN,IXR_ID,IXR_NM,SUM_MDDLTFVL,SUM_SMALTFVL,X_CRDN,CMRA_ID])\n\n return result\n\n\n\n\ndef main():\n\n jsondata = getInterSectionService()\n\n print(jsondata)\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jeonseulah/StudyBigData","sub_path":"MINI_PROJECT/crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17216252353","text":"class Movie:\n def __init__(self, film_id, name, genres, rating, language):\n \"\"\"\n Initialize movie properties\n :param str filmID: ID of the film\n :param str name: Name of the film\n :param list genres: Genres of the film\n :param float rating: Rating of the film\n :param str language: Language of the film\n \"\"\"\n self.film_id = film_id\n self.name = name\n self.genres = genres\n self.rating = rating\n self.language = language\n\n def update(self, name, genres, rating, language):\n \"\"\"\n Update movie properties if not None\n \"\"\"\n if name:\n self.name = name\n if genres:\n self.genres = genres\n if rating:\n self.rating = rating\n if language:\n self.language = language\n","repo_name":"mochatek/flask_restful","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18391002334","text":"from abc import ABC\n\nfrom Core.Location import Location\nfrom Simulation.HexWorld import HexWorld\nfrom Simulation.Organism import Organism\nfrom Simulation.Organisms.Animals.CyberSheep import CyberSheep\nfrom Simulation.Organisms.Plant import Plant\n\n\nclass PineBrosht(Plant, ABC):\n def __init__(self, world):\n super().__init__(world)\n self._set_strength()\n\n def draw(self):\n return 'b'\n\n def birth(self):\n return PineBrosht(self.world)\n\n def get_probability(self):\n return 5\n\n def can_be_partner(self, other: Organism):\n if isinstance(other, PineBrosht):\n return True\n return False\n\n def action(self):\n self.kill_everything_around()\n super().action()\n\n def collision(self, other: Organism):\n if not isinstance(other, CyberSheep):\n self.world.kill_organism(other)\n else:\n super().collision(other)\n\n def kill_everything_around(self):\n already_killed = False\n new_location = Location()\n for i in range(-1, 2, 1):\n for j in range(-1, 2, 1):\n new_location.set(self.get_location().get_x() + j, self.get_location().get_y() + i)\n if not isinstance(self.world, HexWorld):\n if new_location.get_y() < 0 or new_location.get_x() < 0 or new_location.get_x() >= self.world.get_board().get_n() \\\n or new_location.get_y() >= self.world.get_board().get_m():\n pass\n else:\n for organism in self.world.get_organisms():\n if organism.check_if_it_is_alive() and organism.get_location().__eq__(new_location) and \\\n organism is not self and not isinstance(organism, PineBrosht) \\\n and not isinstance(organism, CyberSheep):\n if not already_killed:\n self.world.event_listener.add_string(\"Barszcz \" + str(self.get_location())\n + \"zabija wszystkich
    wokoło\")\n already_killed = True\n self.world.kill_organism(organism)\n self.world.event_listener.add_string(\" Ginie: \" + organism.draw() + \" \" + str(organism.get_location()))\n else:\n if j == -1 and i == 0:\n new_location.set(new_location.get_x() + 2, new_location.get_y() - 2)\n elif j == 1 and i == 0:\n new_location.set(new_location.get_x() - 2, new_location.get_y() + 2)\n if new_location.get_y() < 0 or new_location.get_x() < 0 or new_location.get_x() >= self.world.get_board().get_n() \\\n or new_location.get_y() >= self.world.get_board.get_m() or (i == 1 and j == 1) or (i == -1 and j == -1):\n pass\n else:\n if not already_killed:\n self.world.event_listener.add_string(\"Barszcz \" + self.get_location() + \"zabija \"\n \"wszystkich\\n wokoło\")\n already_killed = True\n self.world.kill_organism(organism)\n self.world.event_listener.add_string(\n \" Ginie: \" + organism.draw() + \" \" + organism.get_location())\n\n def _set_strength(self):\n self._strength = 9\n\n def get_color(self):\n return \"#404746\"\n","repo_name":"MichalTarnacki/_rozne","sub_path":"_python/symulacja_organizmow/Simulation/Organisms/Plants/PineBrosht.py","file_name":"PineBrosht.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27086639673","text":"import pandas as pd\nimport os\n\nsource_path = \"data/\" \n\ndef load_data(folder_name):\n arr = os.listdir(source_path+folder_name)\n print(arr)\n\n # filter out the scv files\n files = []\n for file in arr:\n file[-3:] == \".scv\"\n files.append(file)\n\n df = None\n\n for file in files:\n file_path = source_path + folder_name + \"/\" + file\n\n if(df is None):\n df = pd.read_csv(file_path)\n else:\n df = pd.concat([df, pd.read_csv(file_path)])\n\n\n return df\n\n","repo_name":"SamSweere/Covid19-News-Analysis","sub_path":"src/visualization/get_viz_data.py","file_name":"get_viz_data.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40235069327","text":"import cv2\nimport pyzbar.pyzbar as pyzbar\nfrom selenium import webdriver\n\ndriver=webdriver.Chrome()\n\ncap=cv2.VideoCapture(0)\nwhile True:\n sucess,image=cap.read()\n if not sucess:\n break\n\n gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n qrcodes=pyzbar.decode(gray)\n\n for qrcode in qrcodes:\n data=qrcode.data.decode('utf-8')\n\n driver.get(data)\n\n cv2.imshow('img',image)\n if cv2.waitKey(1) & 0XFF==ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\ndriver.quit()","repo_name":"anandhuarjuna/mediapipeprojetcs","sub_path":"qr code.py","file_name":"qr code.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32669175241","text":"# Create an empty set\ns = set()\n\n# Add elements to set\ns.add(1)\ns.add(3)\ns.add(5)\ns.add(4)\ns.add(3)\nprint(s)\n\n# Remove elemnt from set\ns.remove(5)\nprint(s)\n\n#printing size of the set \nprint(f\"The number of elements in the set are {len(s)}\")","repo_name":"divyangarora21/CS50-Web_Development","sub_path":"lecture_2/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23063949291","text":"import json\nimport numpy as np\nimport pathlib\nimport time\n\nimport nbformat\nfrom nbconvert.preprocessors import ExecutePreprocessor, CellExecutionError\n\n\nfrom ..log import logger\nfrom .ipynbname import name as ipynb_name, path as ipynb_path\nfrom .. import paths\n\n# Timing and Performance\n\ndef timing_info(method):\n def wrapper(*args, **kw):\n start_time = time.time()\n result = method(*args, **kw)\n end_time = time.time()\n logger.info(f\"timing_info: {method.__name__}\"\n f\"@{round((end_time-start_time)*1000,1)} ms\")\n\n return result\n\n return wrapper\n\ndef record_time_interval(section, start_time, line_break=False):\n \"\"\"Record a time interval since the last timestamp\"\"\"\n end_time = time.time()\n delta = end_time - start_time\n if delta < 1:\n delta *= 1000\n units = \"ms\"\n else:\n units = \"s\"\n if line_break:\n logger.debug(\"PROCESS_TIME:{:>36} {} {}\\n\".format(section, round(delta, 1), units))\n else:\n logger.debug(\"PROCESS_TIME:{:>36} {} {}\".format(section, round(delta, 1), units))\n return end_time\n\ndef normalize_numpy_dict(d):\n ret = d.copy()\n for k, v in ret.items():\n if isinstance(v, np.generic):\n ret[k] = np.asscalar(v)\n return ret\n\ndef save_json(filename, obj, indent=2, sort_keys=True):\n \"\"\"Dump an object to disk in json format\n\n filename: pathname\n Filename to dump to\n obj: object\n Object to dump\n indent: integer\n number of characters to indent\n sort_keys: boolean\n Whether to sort keys before writing. Should be True if you ever use revision control\n on the resulting json file.\n \"\"\"\n blob = json.dumps(obj, indent=indent, sort_keys=sort_keys)\n\n with open(filename, 'w') as fw:\n fw.write(blob)\n\ndef load_json(filename):\n \"\"\"Read a json file from disk\"\"\"\n with open(filename) as f:\n obj = json.load(f)\n return obj\n\ndef head_file(filename, n=5):\n \"\"\"Return the first `n` lines of a file\n \"\"\"\n with open(filename, 'r') as fd:\n lines = []\n for i, line in enumerate(fd):\n if i > n:\n break\n lines.append(line)\n return \"\".join(lines)\n\ndef list_dir(path, fully_qualified=False, glob_pattern='*'):\n \"\"\"do an ls on a path\n\n fully_qualified: boolean (default: False)\n If True, return a list of fully qualified pathlib objects.\n if False, return just the bare filenames\n glob_pattern: glob (default: '*')\n File mattern to match\n\n Returns\n -------\n A list of names, or fully qualified pathlib objects\"\"\"\n if fully_qualified:\n return list(pathlib.Path(path).glob(glob_pattern))\n\n return [file.name for file in pathlib.Path(path).glob(glob_pattern)]\n\ndef normalize_to_list(str_or_iterable):\n \"\"\"Convert strings to lists. convert None to list. Convert all other iterables to lists\n \"\"\"\n if isinstance(str_or_iterable, str):\n return [str_or_iterable]\n if str_or_iterable is None:\n return []\n return str_or_iterable\n\n\ndef run_notebook(*,\n notebook_name=None,\n notebook_path=None,\n output_notebook_name=None,\n output_notebook_path=None,\n timeout=-1,\n notebook_version=4,\n kernel='python3',\n ):\n \"\"\"Execute a jupyter notebook\n\n kernel name is an issue: https://github.com/jupyter/nbconvert/issues/515\n\n \"\"\"\n if notebook_path is None:\n notebook_path = paths['notebook_path']\n else:\n notebook_path = pathlib.Path(notebook_path)\n\n if output_notebook_path is None:\n output_notebook_path = paths['interim_data_path']\n else:\n output_notebook_path = pathlib.Path(output_notebook_path)\n\n if output_notebook_name is None:\n output_notebook_name = notebook_name\n\n output_notebook_fq = output_notebook_path / output_notebook_name\n\n with open(notebook_path / notebook_name) as f:\n nb = nbformat.read(f, as_version=notebook_version)\n\n ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel)\n try:\n out = ep.preprocess(nb, {'metadata': {'path': notebook_path}})\n except CellExecutionError:\n out = None\n msg = f\"\"\"Error executing the notebook \"{notebook_name}\".\n\n See notebook \"{str(output_notebook_fq)}\" for the traceback.'\n \"\"\"\n logger.error(msg)\n raise\n finally:\n with open(output_notebook_fq, mode='w', encoding='utf-8') as f:\n nbformat.write(nb, f)\n return output_notebook_name\n","repo_name":"hackalog/easydata","sub_path":"{{ cookiecutter.repo_name }}/{{ cookiecutter.module_name }}/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"81"} +{"seq_id":"21098239689","text":"\"\"\"Tests for storage_to_bq_importer.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom google.auth import credentials\nfrom google.cloud import bigquery\nfrom google.cloud import storage\n\nfrom jobs.workers.bigquery import storage_to_bq_importer\n\n\ndef _make_credentials():\n creds = mock.create_autospec(\n credentials.Credentials, instance=True, spec_set=True)\n return creds\n\n\nclass StorageToBQImporterTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'CSV with no overwrite',\n # Worker parameters\n 'import_json': False,\n 'rows_to_skip': 0,\n 'autodetect': True,\n 'schema': None,\n 'overwrite': False,\n 'dont_create': True,\n # Resulting job config\n 'job_config_source_format': None,\n 'job_config_skip_leading_rows': 0,\n 'job_config_write_disposition': 'WRITE_APPEND',\n 'job_config_create_disposition': 'CREATE_NEVER'\n },\n {\n 'testcase_name': 'CSV with overwrite',\n # Worker parameters\n 'import_json': False,\n 'rows_to_skip': 2,\n 'autodetect': False,\n 'schema': None,\n 'overwrite': True,\n 'dont_create': False,\n # Resulting job config\n 'job_config_source_format': None,\n 'job_config_skip_leading_rows': 2,\n 'job_config_write_disposition': 'WRITE_TRUNCATE',\n 'job_config_create_disposition': 'CREATE_IF_NEEDED'\n },\n {\n 'testcase_name': 'JSON with overwrite',\n # Worker parameters\n 'import_json': True,\n 'rows_to_skip': 0,\n 'autodetect': True,\n 'schema': None,\n 'overwrite': True,\n 'dont_create': False,\n # Resulting job config\n 'job_config_source_format': 'NEWLINE_DELIMITED_JSON',\n 'job_config_skip_leading_rows': None,\n 'job_config_write_disposition': 'WRITE_TRUNCATE',\n 'job_config_create_disposition': 'CREATE_IF_NEEDED'\n },\n )\n def test_load_table_from_uri_with_config(self,\n import_json,\n rows_to_skip,\n autodetect,\n schema,\n overwrite,\n dont_create,\n job_config_source_format,\n job_config_skip_leading_rows,\n job_config_write_disposition,\n job_config_create_disposition):\n worker_inst = storage_to_bq_importer.StorageToBQImporter(\n {\n 'job_id': 'JOBID',\n 'import_json': import_json,\n 'rows_to_skip': rows_to_skip,\n 'autodetect': autodetect,\n 'schema': schema,\n 'overwrite': overwrite,\n 'dont_create': dont_create,\n },\n pipeline_id=1,\n job_id=1,\n logger_project='PROJECT',\n logger_credentials=_make_credentials())\n mock_job = mock.create_autospec(\n bigquery.job.QueryJob, instance=True, spec_set=True)\n mock_job.error_result = None\n mock_job.state = 'DONE'\n mock_bq_client = mock.create_autospec(\n bigquery.Client, instance=True, spec_set=True)\n mock_bq_client.load_table_from_uri.return_value = mock_job\n self.enter_context(\n mock.patch.object(\n storage, 'Client', autospec=True, spec_set=True))\n self.enter_context(\n mock.patch.object(\n worker_inst,\n '_get_client',\n return_value=mock_bq_client,\n autospec=True,\n spec_set=True))\n worker_inst._execute()\n mock_bq_client.load_table_from_uri.assert_called_once()\n load_table_call_args = mock_bq_client.load_table_from_uri.call_args\n call_job_config = load_table_call_args.kwargs['job_config']\n self.assertEqual(call_job_config.autodetect, autodetect)\n self.assertEqual(call_job_config.schema, schema)\n self.assertEqual(call_job_config.source_format,\n job_config_source_format)\n self.assertEqual(call_job_config.skip_leading_rows,\n job_config_skip_leading_rows)\n self.assertEqual(call_job_config.write_disposition,\n job_config_write_disposition)\n self.assertEqual(call_job_config.create_disposition,\n job_config_create_disposition)\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"google/crmint","sub_path":"backend/tests/jobs/unit/workers/storage_to_bq_importer_tests.py","file_name":"storage_to_bq_importer_tests.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"81"} +{"seq_id":"1380410562","text":"from typing import get_origin, Union, Type, get_args, TypeVar\n\nfrom .exceptions import DependencyValidationError\n\nT = TypeVar(\"T\")\n\n\ndef _validate_type_hint(type_hint: Type, value: T) -> T:\n \"\"\"\n Validate the value against the type hint.\n :param type_hint: The type hint.\n :param value: The value to validate.\n :return: The value if it is valid.\n \"\"\"\n if get_origin(type_hint) is Union:\n # To check union type hints we need to obtain the Union args.\n return _validate_union_type_hint(type_hint, value)\n if not isinstance(value, type_hint): # type: ignore[arg-type]\n # If the value is not an instance of the type hint, we raise a\n # DependencyValidationError.\n raise DependencyValidationError(\n expected_type=type_hint, # type: ignore[arg-type]\n received_type=type(value),\n )\n # If the value is valid, we return it.\n return value\n\n\ndef _validate_union_type_hint(type_hint: Type, value: T) -> T:\n \"\"\"\n Validate the value against a union type hint.\n :param type_hint: The union type hint.\n :param value: The value to validate.\n :return: The value if it is valid.\n \"\"\"\n args = get_args(type_hint)\n # Checking if the value is an instance of the union args.\n if not any(isinstance(value, arg) for arg in args):\n raise DependencyValidationError(\n expected_type=args, received_type=type(value)\n )\n # If the value is valid, we return it.\n return value\n","repo_name":"floydya/declarativex","sub_path":"src/declarativex/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"32185857646","text":"# Pymatgen\nfrom pymatgen.core import Structure\nfrom pymatgen.analysis.local_env import CrystalNN, CutOffDictNN\nfrom pymatgen.io.vasp.outputs import Locpot\n\n# Misc \nimport math\nimport numpy as np\nimport pandas as pd\nimport warnings\n\n# surfaxe\nfrom surfaxe.generation import oxidation_states\nfrom surfaxe.io import plot_bond_analysis, plot_electrostatic_potential, _instantiate_structure\n\ndef cart_displacements(start, end, max_disp=0.1, save_txt=True,\ntxt_fname='cart_displacements.txt'):\n \"\"\"\n Produces a text file with all the magnitude of displacements of atoms\n in Cartesian space\n\n Args:\n start (`str`): Filename of initial structure file in any format \n supported by pymatgen or pymatgen structure object.\n end (`str`): Filename of final structure file in any format supported\n by pymatgen or pymatgen structure object.\n max_disp (`float`, optional): The maximum displacement shown. Defaults \n to 0.1 Å.\n save_txt (`bool`, optional): Save the displacements to file. Defaults to \n ``True``.\n txt_fname (`str`, optional): Filename of the csv file. Defaults to \n ``'cart_displacement.txt'``.\n\n Returns:\n None (default) or DataFrame of displacements of atoms in Cartesian space \n\n \"\"\"\n # Instantiate the structures \n start_struc = _instantiate_structure(start)\n end_struc = _instantiate_structure(end)\n\n # Add the site labels to the structure\n els = ''.join([i for i in start_struc.formula if not i.isdigit()]).split(' ')\n el_dict = {i : 1 for i in els}\n site_labels = []\n\n for site in start_struc:\n symbol = site.specie.symbol\n site_labels.append((symbol,el_dict[symbol]))\n el_dict[symbol] +=1\n start_struc.add_site_property('', site_labels)\n\n # Convert to cartesian coordinates\n start_struc = start_struc.cart_coords\n end_struc = end_struc.cart_coords\n\n # Calculate the displacements\n disp_list = []\n for n, (start_coord, end_coord) in enumerate(zip(start_struc, end_struc)):\n xdisp = math.pow(start_coord[0] - end_coord[0], 2)\n ydisp = math.pow(start_coord[1] - end_coord[1], 2)\n zdisp = math.pow(start_coord[2] - end_coord[2], 2)\n d = math.sqrt(xdisp + ydisp + zdisp)\n label = site_labels[n]\n if d >= max_disp:\n disp_list.append({\n 'site': n+1,\n 'atom': label,\n # this makes the displacements round to the same number of \n # decimal places as max displacement, for presentation \n 'displacement': round(d, int(format(max_disp, 'E')[-1])) \n })\n # Save as txt file\n df = pd.DataFrame(disp_list)\n\n if save_txt: \n df.to_csv(txt_fname, header=True, index=False, sep='\\t', mode='w')\n else: \n return df\n\ndef bond_analysis(structure, bond, nn_method=CrystalNN(), ox_states=None, \nsave_csv=True, csv_fname='bond_analysis.csv', save_plt=False, \nplt_fname='bond_analysis.png', **kwargs):\n \"\"\"\n Parses the structure looking for bonds between atoms. Check the validity of\n the nearest neighbour method on the bulk structure before using it on slabs.\n\n Args:\n structure (`str`): filename of structure, takes all pymatgen-supported \n formats, including pmg structure object\n bond (`list`): Bond to analyse e.g. ``['Y', 'O']``\n nn_method (`class`, optional): The coordination number prediction \n algorithm used. Because the ``nn_method`` is a class, the class \n needs to be imported from ``pymatgen.analysis.local_env`` before it \n can be instantiated here. Defaults to ``CrystalNN()``.\n ox_states (``None``, `list` or `dict`, optional): Add oxidation states \n to the structure. Different types of oxidation states specified will \n result in different pymatgen functions used. The options are: \n \n * if supplied as ``list``: The oxidation states are added by site \n \n e.g. ``[3, 2, 2, 1, -2, -2, -2, -2]``\n \n * if supplied as ``dict``: The oxidation states are added by element\n \n e.g. ``{'Fe': 3, 'O':-2}``\n \n * if ``None``: The oxidation states are added by guess. \n \n Defaults to ``None``. \n save_csv (`bool`, optional): Makes a csv file with the c coordinate of \n the first atom and bond length. Defaults to ``True``.\n csv_fname (`str`, optional): Filename of the csv file. Defaults to \n ``'bond_analysis.csv'``.\n save_plt (`bool`, optional): Make and save the bond analysis plot. \n Defaults to ``False``. \n plt_fname (`str`, optional): Filename of the plot. Defaults to \n ``'bond_analysis.png'``. \n\n Returns:\n DataFrame with the c coordinate of the first atom and bond length\n \"\"\"\n struc = _instantiate_structure(structure)\n struc = oxidation_states(structure=struc, ox_states=ox_states)\n\n if len(bond) > 2: \n warnings.warn('Bond with more than two elements supplied. '\n 'Only the first two elements will be treated as a bond.')\n\n # Iterates through the structure, looking for pairs of bonded atoms. If the\n # sites match, the bond distance is calculated and passed to a dataframe\n bonds_info = []\n for n, pos in enumerate(struc):\n if pos.specie.symbol == bond[0]:\n nearest_neighbours = nn_method.get_nn_info(struc, n)\n matched_sites = []\n for d in nearest_neighbours:\n if d.get('site').specie.symbol == bond[1]:\n matched_sites.append(d)\n bond_distances = [\n struc.get_distance(n,x['site_index']) for x in matched_sites\n ]\n bonds_info.append({\n '{}_index'.format(bond[0]): n+1,\n '{}_c_coord'.format(bond[0]): pos.c,\n '{}-{}_bond_distance'.format(bond[0],bond[1]): np.mean(bond_distances)\n })\n\n df = pd.DataFrame(bonds_info)\n \n # Save plot and csv, or return the DataFrame \n if save_plt: \n plot_bond_analysis(bond, df=df, plt_fname=plt_fname, **kwargs)\n if save_csv: \n if not csv_fname.endswith('.csv'):\n csv_fname += '.csv'\n df.to_csv(csv_fname, header=True, index=False)\n else: \n return df\n\n\ndef electrostatic_potential(locpot='./LOCPOT', lattice_vector=None,\nsave_csv=True, csv_fname='potential.csv', save_plt=True, \nplt_fname='potential.png', **kwargs):\n \"\"\"\n Reads LOCPOT to get the planar and optionally macroscopic potential in \n c direction. \n\n Args:\n locpot (`str`, optional): The path to the LOCPOT file. Defaults to \n ``'./LOCPOT'``\n lattice_vector (`float`, optional): The periodicity of the slab, \n calculates macroscopic potential with that periodicity \n save_csv (`bool`, optional): Saves to csv. Defaults to ``True``.\n csv_fname (`str`, optional): Filename of the csv file. Defaults \n to ``'potential.csv'``.\n save_plt (`bool`, optional): Make and save the plot of electrostatic \n potential. Defaults to ``True``. \n plt_fname (`str`, optional): Filename of the plot. Defaults to \n ``'potential.png'``.\n\n Returns:\n DataFrame\n \"\"\"\n # Read potential and structure data\n lpt = Locpot.from_file(locpot)\n struc = Structure.from_file(locpot)\n\n # Planar potential\n planar = lpt.get_average_along_axis(2)\n df = pd.DataFrame(data=planar, columns=['planar']) \n \n # Calculate macroscopic potential\n if lattice_vector is not None: \n # Divide lattice parameter by no. of grid points in the direction\n resolution = struc.lattice.abc[2]/lpt.dim[2]\n\n # Get number of points over which the rolling average is evaluated\n points = int(lattice_vector/resolution)\n\n # Need extra points at the start and end of planar potential to evaluate the\n # macroscopic potential this makes use of the PBC where the end of one unit\n # cell coincides with start of the next one\n add_to_start = planar[(len(planar) - points): ]\n add_to_end = planar[0:points]\n pfm_data = np.concatenate((add_to_start,planar,add_to_end))\n pfm = pd.DataFrame(data=pfm_data, columns=['y'])\n\n # Macroscopic potential\n m_data = pfm.y.rolling(window=points, center=True).mean()\n macroscopic = m_data.iloc[points:(len(planar)+points)]\n macroscopic.reset_index(drop=True,inplace=True)\n df['macroscopic'] = macroscopic\n\n # Get gradient of the plot - this is used for convergence testing, to make \n # sure the potential is actually flat\n df['gradient'] = np.gradient(df['planar'])\n\n # Plot and save the graph, save the csv or return the dataframe\n if save_plt: \n plot_electrostatic_potential(df=df, plt_fname=plt_fname, **kwargs)\n if save_csv: \n if not csv_fname.endswith('.csv'):\n csv_fname += '.csv'\n df.to_csv(csv_fname, header=True, index=False)\n else: \n return df\n\ndef simple_nn(start, end=None, ox_states=None, nn_method=CrystalNN(), \nsave_csv=True, csv_fname='nn_data.csv'):\n \"\"\"\n Finds the nearest neighbours for simple structures. Before using on slabs\n make sure the nn_method works with the bulk structure. \n \n The ``site_index`` in the produced DataFrame or csv file is one-indexed and \n represents the atom index in the structure. \n\n Args:\n start (`str`): Filename of structure file in any format supported by \n pymatgen\n end (`str`, optional): Filename of structure file in any format \n supported by pymatgen. Use if comparing initial and final structures. \n The structures must have same constituent atoms and number of sites. \n Defaults to ``None``. \n ox_states (``None``, `list` or `dict`, optional): Add oxidation states \n to the structure. Different types of oxidation states specified will \n result in different pymatgen functions used. The options are: \n \n * if supplied as ``list``: The oxidation states are added by site \n \n e.g. ``[3, 2, 2, 1, -2, -2, -2, -2]``\n \n * if supplied as ``dict``: The oxidation states are added by element\n \n e.g. ``{'Fe': 3, 'O':-2}``\n \n * if ``None``: The oxidation states are added by guess. \n \n Defaults to ``None``. \n nn_method (`class`, optional): The coordination number prediction \n algorithm used. Because the ``nn_method`` is a class, the class \n needs to be imported from pymatgen.analysis.local_env before it \n can be instantiated here. Defaults to ``CrystalNN()``.\n save_csv (`bool`, optional): Save to a csv file. Defaults to ``True``.\n csv_fname (`str`, optional): Filename of the csv file. Defaults to \n ``'nn_data.csv'``\n \n Returns\n None (default) or DataFrame containing coordination data \n \"\"\"\n # Instantiate start structure object\n start_struc = _instantiate_structure(start)\n\n # Add atom site labels to the structure\n els = ''.join([i for i in start_struc.formula if not i.isdigit()]).split(' ')\n el_dict = {i : 1 for i in els}\n site_labels = []\n for site in start_struc:\n symbol = site.specie.symbol\n site_labels.append((symbol,el_dict[symbol]))\n el_dict[symbol] +=1\n start_struc.add_site_property('', site_labels)\n \n # Add oxidation states and get bonded structure\n start_struc = oxidation_states(start_struc, ox_states)\n bonded_start = nn_method.get_bonded_structure(start_struc)\n\n if end: \n end_struc = _instantiate_structure(end)\n end_struc = oxidation_states(end_struc, ox_states)\n bonded_end = nn_method.get_bonded_structure(end_struc)\n \n # Iterate through structure, evaluate the coordination number and the \n # nearest neighbours specie for start and end structures, collects the\n # symbol and index of the site (atom) evaluated and its nearest neighbours \n df_list = []\n for n, site in enumerate(start_struc):\n cn_start = bonded_start.get_coordination_of_site(n)\n coord_start = bonded_start.get_connected_sites(n)\n specie_list = []\n for d in coord_start: \n spc = d.site.specie.symbol \n specie_list.append(spc)\n specie_list.sort()\n site_nn_start = ' '.join(specie_list)\n label = site_labels[n]\n\n if end: \n cn_end = bonded_end.get_coordination_of_site(n)\n coord_end = bonded_end.get_connected_sites(n)\n specie_list = []\n for d in coord_end: \n spc = d.site.specie.symbol \n specie_list.append(spc)\n specie_list.sort()\n site_nn_end = ' '.join(specie_list)\n df_list.append({'site': n+1, 'atom': label, 'cn_start': cn_start,\n 'nn_start': site_nn_start, 'cn_end': cn_end, 'nn_end': site_nn_end})\n\n else: \n df_list.append({'site_index': n+1, 'site': label,\n 'cn_start': cn_start, 'nn_start': site_nn_start})\n\n # Make a dataframe from df_list \n df = pd.DataFrame(df_list)\n\n # Save the csv file or return as dataframe \n if save_csv: \n if not csv_fname.endswith('.csv'):\n csv_fname += '.csv'\n df.to_csv(csv_fname, header=True, index=False)\n else: \n return df\n\n\ndef complex_nn(start, cut_off_dict, end=None, ox_states=None, \nsave_csv=True, csv_fname='nn_data.csv'):\n \"\"\"\n Finds the nearest neighbours for more complex structures. Uses CutOffDictNN()\n class as the nearest neighbour method. Check validity on bulk structure\n before applying to surface slabs. \n\n The ``site_index`` in the produced DataFrame or csv file is one-indexed and \n represents the atom index in the structure. \n\n Args:\n start (`str`): filename of structure, takes all pymatgen-supported formats.\n cut_off_dict (`dict`): Dictionary of bond lengths. The bonds should be \n specified with the oxidation states\\n\n e.g. ``{('Bi3+', 'O2-'): 2.46, ('V5+', 'O2-'): 1.73}``\n end (`str`, optional): filename of structure to analyse, use if \n comparing initial and final structures. The structures must have \n same constituent atoms and number of sites. Defaults to ``None``. \n ox_states (``None``, `list` or `dict`, optional): Add oxidation states \n to the structure. Different types of oxidation states specified will \n result in different pymatgen functions used. The options are: \n \n * if supplied as ``list``: The oxidation states are added by site \n \n e.g. ``[3, 2, 2, 1, -2, -2, -2, -2]``\n \n * if supplied as ``dict``: The oxidation states are added by element\n \n e.g. ``{'Fe': 3, 'O':-2}``\n \n * if ``None``: The oxidation states are added by guess. \n\n Defaults to ``None`` \n save_csv (`bool`, optional): Save to a csv file. Defaults to ``True``.\n csv_fname (`str`, optional): Filename of the csv file. Defaults to \n ``'nn_data.csv'``\n \n Returns\n None (default) or DataFrame containing coordination data.\n \"\"\"\n # Instantiate start structure object\n start_struc = Structure.from_file(start)\n\n # Add atom site labels to the structure\n els = ''.join([i for i in start_struc.formula if not i.isdigit()]).split(' ')\n el_dict = {i : 1 for i in els}\n site_labels = []\n for site in start_struc:\n symbol = site.specie.symbol\n site_labels.append((symbol,el_dict[symbol]))\n el_dict[symbol] +=1\n start_struc.add_site_property('', site_labels)\n\n # Add oxidation states \n start_struc = oxidation_states(start_struc, ox_states=ox_states)\n\n # Instantiate the nearest neighbour algorithm and get bonded structure\n codnn = CutOffDictNN(cut_off_dict=cut_off_dict)\n bonded_start = codnn.get_bonded_structure(start_struc)\n\n # Instantiate the end structure if provided\n if end: \n end_struc = Structure.from_file(end)\n end_struc = oxidation_states(end_struc, ox_states=ox_states)\n bonded_end = codnn.get_bonded_structure(end_struc)\n\n # Iterate through structure, evaluate the coordination number and the \n # nearest neighbours specie for start and end structures, collects the\n # symbol and index of the site (atom) evaluated and its nearest neighbours \n df_list = []\n for n, site in enumerate(start_struc):\n cn_start = bonded_start.get_coordination_of_site(n)\n coord_start = bonded_start.get_connected_sites(n)\n specie_list = []\n for d in coord_start: \n spc = d.site.specie.symbol \n specie_list.append(spc)\n specie_list.sort()\n site_nn_start = ' '.join(specie_list)\n label = site_labels[n]\n\n if end: \n cn_end = bonded_end.get_coordination_of_site(n)\n coord_end = bonded_end.get_connected_sites(n)\n specie_list = []\n for d in coord_end: \n spc = d.site.specie.symbol \n specie_list.append(spc)\n specie_list.sort()\n site_nn_end = ' '.join(specie_list)\n df_list.append({'site': n+1, 'atom': label, 'cn start': cn_start,\n 'nn_start': site_nn_start, 'cn_end': cn_end, 'nn_end': site_nn_end})\n\n else: \n df_list.append({'site_index': n+1, 'site': label,\n 'cn_start': cn_start, 'nn_start': site_nn_start})\n\n # Make a dataframe from df_list \n df = pd.DataFrame(df_list)\n \n # Save the csv file or return as dataframe \n if save_csv: \n if not csv_fname.endswith('.csv'):\n csv_fname += '.csv'\n df.to_csv(csv_fname, header=True, index=False)\n else: \n return df\n","repo_name":"faradaymahe/surfaxe","sub_path":"surfaxe/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":18243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26617164819","text":"import decimal\r\nfrom api import get_free_balance, get_market_ask_price, get_min_qty_binance, place_order_tp_sl\r\nfrom check_amount import get_investment_amount\r\nfrom config import *\r\nfrom functions.logger import logger\r\nfrom get_tp_sl import get_tp_sl\r\n\r\ndef buylogic(data):\r\n \"\"\"\r\n Function to determine if a buy order should be placed based on the confidence score and other parameters.\r\n :param confidence_score: The confidence score for the buy decision.\r\n :param buythreshold: The threshold for the confidence score to trigger a buy.\r\n :param usdtbalance: The current balance of USDT.\r\n \"\"\"\r\n\r\n \r\n logger(\"Decided to buy %\", MAXBUYPERCENTOFCAPITAL, \" of USDT balance. Checking if enough...\")\r\n\r\n side = 'buy'\r\n tp,sl = get_tp_sl(data,len(data)-1)\r\n # tp,sl = tpsl_smallest_movement()\r\n amount,market_price,buyamountinbtc,buyamountinusdt,usdtbalance,error = get_investment_amount(side)\r\n # Check if the transaction amount is greater than the minimum transaction size\r\n if error==None:\r\n\r\n logger(\"Enough USDT to cover purchase of \", \"USDT\", \"|USDT balance: \", usdtbalance,\r\n \" |BTC TSCN QTY: \", buyamountinbtc, \"USDT TSCN QTY:\", amount)\r\n \r\n btcbalance = get_free_balance(\"BTC\")\r\n\r\n\r\n response = place_order_tp_sl(TEST,\"market\", \"BTCUSDT\",\"buy\",usdtbalance,btcbalance,tp,sl, amount)\r\n\r\n print(response)\r\n else:\r\n logger(error)\r\n\r\ndef getminimumtransactionamountUSDT():\r\n return get_min_qty_binance(TRADINGPAIR)*get_market_ask_price(TRADINGPAIR)\r\ndef getminimumtransactionamountBTC():\r\n return decimal.Decimal(get_min_qty_binance(TRADINGPAIR))\r\n\r\n","repo_name":"jcianci12/PythonScikitTrader","sub_path":"logic/buylogic.py","file_name":"buylogic.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35511886624","text":"# Author: Reda Mohsen\r\n\r\n# Bisection Search Algorithm\r\nprint(\"Searching for a number from 0 to 100\")\r\nprint(\"Please think of an integer number between 0 and 100!\")\r\n# for example number = 12\r\nlow = 0\r\nhigh = 100\r\nmedium = int((low+high)/2) # medium initially = 50\r\nstate = True\r\n\r\n# Define check_guess function\r\ndef check_guess(medium):\r\n # Docstrings\r\n \"\"\"\r\n This function takes the medum and ask the user if it is the number he guesses!\r\n Input: medium, Type: Integer\r\n Output: c if the guess is correct,\r\n h if the guess is higher,\r\n l if the guess is lower than the medium\r\n \"\"\"\r\n print(\"Is you secret number = \"+ str(medium)+\" ?\")\r\n guess = input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly \\n\")\r\n return guess\r\n\r\ncounter = 0\r\nwhile state: # true, true, false\r\n guess = check_guess(medium)\r\n if (guess == 'c'):\r\n print(\"Game over. Your secret number is \"+ str(medium)+\" !!\")\r\n state = False\r\n elif (guess == 'l'): # true, true\r\n high = medium # high = 50, high = 25\r\n medium = (low+high)//2 # medium = 25, medium = 12\r\n counter +=1 # counter = 1, counter = 2\r\n elif (guess=='h'):\r\n low = medium\r\n medium = (low+high)//2\r\n counter +=1\r\n \r\nOutput = str(medium) + \" is found in \" + str(counter+1) + \" iteration\"\r\nprint(Output) # print with medium = 12 and counter = 2\r\n","repo_name":"reda-mohsen/Python_Programs","sub_path":"Bisection_Search_Algorithm.py","file_name":"Bisection_Search_Algorithm.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16120400067","text":"import json\nimport models.chart_data_models as c_d_m\nimport models.table_data_model as t_d_m\nimport models.analytic_form_model as a_f_m\nimport modules.static_chart_builder as s_c_b\nfrom decimal import Decimal\nfrom re import sub\nimport zlib\nimport base64\nimport copy\n\nfrom db_models.models import Users, Clients, AnalyticRules, Projects\nfrom db.db import session\nfrom flask import Flask, jsonify, request\nfrom flask_restful import Resource, fields, marshal_with, abort, reqparse\nimport json\nimport result_models.res_model as r_m\nfrom sqlalchemy import and_\nimport datetime\ndef to_bytes(bytes_or_str):\n if isinstance(bytes_or_str, str):\n value = bytes_or_str.encode() # uses 'utf-8' for encoding\n else:\n value = bytes_or_str\n return value # Instance of bytes\n\n\ndef check_utf8(bytes_or_str):\n try:\n value = bytes_or_str.decode(encoding='utf-8')\n print(\"Convert to UTF-8 - OK\")\n return True\n except Exception as e:\n return False\n\ndef check_utf16(bytes_or_str):\n try:\n value = bytes_or_str.decode(encoding='utf-16')\n print(\"Convert to UTF-16 - OK\")\n return True\n except Exception as e:\n return False\n\n\ndef to_str(bytes_or_str):\n try:\n if isinstance(bytes_or_str, bytes):\n if (check_utf8(bytes_or_str)):\n print(\"Encode to utf-8\")\n value = bytes_or_str.decode(encoding='utf-8') # uses 'utf-8' for encoding\n else:\n print(\"Encode to utf-16\")\n value = bytes_or_str.decode(encoding='utf-16')\n\n print('bytes or string OK')\n # value = bytes_or_str.decode() # uses 'utf-8' for encoding\n else:\n value = bytes_or_str\n return value\n except Exception as e:\n print('TO STR ERROR '+str(e))\n\n\ndef check_if_formula(type_id):\n try:\n f_letter = str(type_id)[0]\n if (f_letter == '2'):\n n = int(type_id)\n if (n >= 250):\n return True\n t = 0\n\n if (f_letter == '3'):\n n = int(type_id)\n if (n >= 350):\n return True\n t = 0\n return False\n pass\n except:\n return False\n pass\n\n\ndef get_formula_elements(project_id, type_id, analysis_type):\n try:\n types = []\n project = session.query(Projects).filter(Projects.id == int(project_id)).first()\n if (project == None):\n return []\n\n user_id = project.user_id\n\n user_client = session.query(Users).filter(Users.id == user_id).first()\n\n if not user_client:\n return []\n client_id = user_client.client_id\n\n analytic_rules = session.query(AnalyticRules).filter(and_(\n AnalyticRules.client_id == client_id),\n AnalyticRules.is_default == True\n ).first()\n model = json.loads(analytic_rules.data)\n\n if (str(type_id).startswith('2') and str(analysis_type) == '1'):\n # build to OSV\n pass\n else:\n if (str(type_id).startswith('2')):\n formulas = model[\"opiu_rules\"][\"card_rules\"][\"cards_formulas\"][\"opiu_cards_formulas\"]\n for f in formulas:\n if (f[\"code\"] == type_id):\n for element in f[\"formula_elements\"]:\n types.append(element['code'])\n et = 0\n y = 0\n pass\n\n if (str(type_id).startswith('3')):\n formulas = model[\"odds_rules\"][\"odds_formulas\"][\"odds_formulas\"]\n for f in formulas:\n if (f[\"code\"] == type_id):\n for element in f[\"formula_elements\"]:\n types.append(element['code'])\n et = 0\n y = 0\n pass\n\n t = 0\n return types\n pass\n except Exception as e:\n return []\n\n\ndef convert_details_by_period(documents, month, year, type_id, analysis_type, project_id):\n try:\n result = \"\"\n headers = []\n result = []\n is_formula = check_if_formula(type_id)\n clear_table = []\n types = []\n if (is_formula == False):\n types.append(type_id)\n else:\n types = get_formula_elements(project_id, type_id, analysis_type)\n\n for d in documents:\n pat = '90.02'\n file_name = d.file_name\n exists = pat in file_name\n if (exists==True):\n t=0\n\n\n s_cmpstr = copy.deepcopy(d.data)\n\n s_cmpstr = s_cmpstr.replace(\"b'\", \"\", 1)\n\n s_cmpstr = s_cmpstr.replace(\"'\", \"\")\n b_cmpstr = to_bytes(s_cmpstr)\n b_cmpstr = base64.b64decode(b_cmpstr)\n\n\n tmp = zlib.decompress(b_cmpstr)\n del s_cmpstr\n del b_cmpstr\n\n rr = to_str(tmp)\n del tmp\n f_cmpstr = rr\n # f_cmpstr = f_cmpstr.replace(\"'\", \"\")\n rr = json.loads(f_cmpstr)\n del f_cmpstr\n try:\n itms = rr[\"rows\"][0][\"cells\"][0][\"tableData\"][\"items\"]\n if (len(headers) == 0):\n headers = rr[\"rows\"][0][\"cells\"][0][\"tableData\"][\"headers\"]\n tb = []\n for tp in types:\n\n tb = []\n if (year != 999 and not str(year).startswith('777')):\n if (analysis_type==0):\n tb = [t for t in itms if\n (\n str(t[\"month\"]) == str(month) and str(t[\"year\"]) == str(year) and str(t[\"typeId\"]) == str(\n tp))]\n elif (analysis_type==1):\n tb = [t for t in itms if\n (\n str(t[\"month\"]) == str(month) and str(t[\"year\"]) == str(year) and str(\n t[\"typeId\"]) == str(\n tp))]\n if (len(tb) > 0):\n result.append(tb)\n elif (year==999):\n tb = [t for t in itms if\n (\n str(t[\"typeId\"]) == str(\n tp))]\n\n if (len(tb) > 0):\n result.append(tb)\n\n elif (str(year).startswith('777')==True):\n _year = int(str(year).replace('777',''))\n tb = [t for t in itms if\n (str(t[\"year\"]) == str(_year) and str(\n t[\"typeId\"]) == str(\n tp))\n ]\n if (len(tb) > 0):\n result.append(tb)\n\n\n # genearate form\n\n except Exception as e:\n t = 0\n\n for r in result:\n for t in r:\n clear_table.append(t)\n if (analysis_type!='1' and str(type_id).startswith('2')==False):\n for p in clear_table:\n p[\"period\"] = datetime.datetime.strptime(p[\"period\"], '%d.%m.%Y').date()\n clear_table.sort(key=lambda x: x[\"period\"], reverse=False)\n form = a_f_m.AForm()\n if (str(type_id).startswith('2') and str(analysis_type) == '1'):\n #################################################\n form.add_row(\"Графики\")\n row = form.get_last_row()\n\n # генерируем круговые диаграммы\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['account', 'periodTransactionsDebet', 'Обороты за период по дебету'],\n ['account', 'periodTransactionsCredit', 'Обороты за период по кредиту']\n ]\n\n for c in c_values:\n values = []\n\n for detail in clear_table:\n f_val = detail[c[0]] # (getattr(detail, c[0]))\n res = Decimal(sub(r'[^\\d.]', '', detail[c[1]]))\n s_val = float(res) # (getattr(detail, c[1]))\n\n v = [f_val, s_val]\n values.append(v)\n all_nulls = True\n\n for v in values:\n if (v[1] != 0):\n all_nulls = False\n break\n if (all_nulls):\n continue\n s_c_b.generate_balance_pie_charts(row, 'PieChart',\n c[2],\n 400,\n c[2], \"\", False, column_names, values)\n\n\n ##############################\n #################################################\n form.add_row(\"Данные\")\n table = t_d_m.TableData()\n table.headers = headers\n\n table.init_model(clear_table)\n # if (len(table.items)==0):\n # #check is formula\n # build_formula_details(table,rr)\n\n row = form.get_last_row()\n row.add_cell(table)\n\n return form\n except Exception as e:\n\n return \"\"\n","repo_name":"vyadzmak/Landau.X.Api","sub_path":"modules/details_converter.py","file_name":"details_converter.py","file_ext":"py","file_size_in_byte":9496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20342457287","text":"##AES 128 encryption.\n\nfrom cryptography.fernet import Fernet\n#The cryptography.hazmat.primitives.ciphers module in Python is part of the Cryptography library and provides a low-level interface for working with symmetric encryption algorithms.\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\n\n# Generate a random 128-bit key\nkey = Fernet.generate_key()\n\n# Create an AES-128 cipher object\ncipher = Cipher(algorithms.AES(key), modes.ECB(), backend=default_backend())\n\n# Define a plaintext message\nmessage = b\"This is a secret message.\"\n\n# Pad the message to a multiple of 16 bytes (the block size for AES-128)\npadded_message = message + b\"\\0\" * (16 - len(message) % 16)\n\n# Create an encryptor object and encrypt the padded message\nencryptor = cipher.encryptor()\nciphertext = encryptor.update(padded_message) + encryptor.finalize()\n\n# Print the key and ciphertext\nprint(\"Key:\", key)\nprint(\"Ciphertext:\", ciphertext)\n","repo_name":"Shafqathassan/Cyber-Sec-Tools-Collected","sub_path":"AES128-Encryption.py","file_name":"AES128-Encryption.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4824215814","text":"import forca\nimport adivinhacao\n\ndef escolhe_jogo():\n print(\"********************************\")\n print(\"******Escolha o seu jogo!*******\")\n print(\"********************************\")\n\n print(\"(1) Forca (2) Adivinhação\")\n\n jogo = int(input(\"Faça sua escolha: \"))\n\n if(jogo == 1):\n forca.jogar()\n elif(jogo == 2):\n adivinhacao.jogar()\n\nif(__name__ == \"__main__\"):\n escolhe_jogo()","repo_name":"rveneroso/python-alura-curso2","sub_path":"jogos.py","file_name":"jogos.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38528950104","text":"from sys import stdout\r\nimport logging\r\n\r\nclass Allog():\r\n\tdef __init__(self, log_file_path):\r\n\t\tlogging.basicConfig(filename=log_file_path,\r\n filemode='a',\r\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\r\n datefmt='%H:%M:%S',\r\n level=logging.INFO)\r\n\r\n\t\tself.log = logging.getLogger()\r\n\t\tformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n\t\thandler = logging.StreamHandler(stdout)\r\n\t\thandler.setFormatter(formatter)\r\n\t\tself.log.addHandler(handler)\r\n\r\n\tdef info(self, message):\r\n\t\tself.log.info(message)","repo_name":"alannguyencs/sibnet","sub_path":"src/utils/util_log.py","file_name":"util_log.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"13472945191","text":"import json\nimport os\n\nimport requests\nfrom flask import Blueprint, abort, current_app, jsonify, request\nfrom flask.globals import session\nfrom flask.wrappers import Response\nfrom google import auth\nfrom google.oauth2 import id_token\nfrom google_auth_oauthlib.flow import Flow\nfrom werkzeug.utils import redirect\n\nfrom src.app.middlewares.auth import logged_in, requires_access_level\nfrom src.app.models.user import User, users_share_schema\nfrom src.app.services.users_service import (create_role, create_user,\n format_print_user,\n get_user_by_email, login_user,\n update_user)\nfrom src.app.utils import exist_key, generate_jwt\n\nuser = Blueprint('user', __name__, url_prefix='/user')\n\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\n\nflow = Flow.from_client_secrets_file(\n client_secrets_file='src/app/db/client_secret.json',\n scopes=[\n 'https://www.googleapis.com/auth/userinfo.email',\n 'https://www.googleapis.com/auth/userinfo.profile',\n 'openid',\n ],\n redirect_uri='http://localhost:5000/user/callback',\n)\n\n\n@user.route('/login', methods=['POST'])\n@logged_in()\ndef login():\n list_keys = ['email', 'password']\n data = exist_key(request.get_json(), list_keys)\n if 'error' in data:\n return jsonify(data), 400\n\n response = login_user(data['email'], data['password'])\n if 'error' in response:\n return jsonify(response), 400\n\n return Response(\n response=json.dumps(response), status=200, mimetype='application/json'\n )\n\n\n@user.route('/auth/google', methods=['POST'])\ndef auth_google():\n authorization_url, state = flow.authorization_url()\n session['state'] = state\n\n return Response(\n response=json.dumps({'url': authorization_url}),\n status=200,\n mimetype='application/json',\n )\n\n\n@user.route('/callback', methods=['GET'])\ndef callback():\n flow.fetch_token(authorization_response=request.url)\n credentials = flow.credentials\n request_session = requests.session()\n token_google = auth.transport.requests.Request(session=request_session)\n\n user_google_dict = id_token.verify_oauth2_token(\n id_token=credentials.id_token,\n request=token_google,\n audience=current_app.config['GOOGLE_CLIENT_ID'],\n )\n email = user_google_dict['email']\n name = user_google_dict['name']\n user = get_user_by_email(email)\n\n if 'error' in user or user is None:\n new_user = {\n 'city_id': 1,\n 'gender_id': 1,\n 'role_id': 3,\n 'name': name,\n 'age': None,\n 'email': email,\n 'phone': None,\n 'password': 'senha123',\n 'cep': None,\n 'street': None,\n 'district': None,\n 'complement': None,\n 'landmark': None,\n 'number_street': None,\n }\n user = create_user(new_user, validate=False)\n\n user_google_dict['user_id'] = user['id']\n user_google_dict['roles'] = user['id']\n session['google_id'] = user_google_dict.get('sub')\n del user_google_dict['aud']\n del user_google_dict['azp']\n\n token = generate_jwt(user_google_dict)\n\n return redirect(f\"{current_app.config['FRONTEND_URL']}?jwt={token}\")\n\n\n@user.route('/logout', methods=['POST'])\ndef logout():\n session.clear()\n return jsonify({'message': 'Você foi deslogado'}), 200\n\n\n@user.route('/', methods=['POST'])\n@requires_access_level(['READ', 'WRITE', 'UPDATE', 'DELETE'])\ndef create():\n data = request.get_json()\n response = create_user(data)\n\n if 'error' in response:\n return jsonify(response), 400\n\n return jsonify(response), 201\n\n\n@user.route('/', methods=['GET'])\n@requires_access_level(['READ'])\ndef get_user_by_name():\n page = request.args.get('page', 1, type=int)\n per_page = 20\n pager = User.query.paginate(page, per_page, error_out=False)\n\n if not request.args.get('name'):\n users = users_share_schema.dump(pager.items)\n result = [format_print_user(result) for result in users]\n if not result:\n return json.dumps({'message': 'Nenhum usuário encontrado'}), 204\n\n return jsonify({'Status': 'Sucesso', 'Dados': result}), 200\n\n user_query = User.query.filter(\n User.name.ilike('%' + request.args.get('name') + '%')\n ).all()\n user = users_share_schema.dump(user_query)\n\n if not user:\n return (\n json.dumps(\n {'Status': 'Erro', 'Mensagem': 'Usuário não encontrado'}\n ),\n 204,\n )\n\n result = [format_print_user(result) for result in user]\n\n return jsonify({'Status': 'Sucesso', 'Dados': result}), 200\n\n\n@user.route('/', methods=['PATCH'])\n@requires_access_level(['UPDATE'])\ndef update_user_by_id(id):\n if id is None or id == 0 or not request.json:\n abort(400)\n user = User.query.get(id)\n if user is None:\n return {'error': 'Usuário não encontrado!'}, 404\n \n data = request.get_json()\n\n response = update_user(data, user)\n\n if 'error' in response:\n return json.dumps(response), 400\n\n return json.dumps(response), 204\n\n@user.route('/role', methods=['POST'])\n@requires_access_level(['READ', 'WRITE', 'UPDATE', 'DELETE'])\ndef add_new_role():\n data = request.get_json()\n response = create_role(data)\n\n if 'error' in response:\n return jsonify(response), 400\n\n return jsonify(response), 201\n","repo_name":"edumartinsrib/M3P1-DEVinventory-","sub_path":"src/app/controllers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35981079117","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.response.AlipayResponse import AlipayResponse\nfrom alipay.aop.api.domain.DeviceGroup import DeviceGroup\n\n\nclass AlipayCommerceIotGroupBatchqueryResponse(AlipayResponse):\n\n def __init__(self):\n super(AlipayCommerceIotGroupBatchqueryResponse, self).__init__()\n self._groups = None\n self._total = None\n\n @property\n def groups(self):\n return self._groups\n\n @groups.setter\n def groups(self, value):\n if isinstance(value, list):\n self._groups = list()\n for i in value:\n if isinstance(i, DeviceGroup):\n self._groups.append(i)\n else:\n self._groups.append(DeviceGroup.from_alipay_dict(i))\n @property\n def total(self):\n return self._total\n\n @total.setter\n def total(self, value):\n if isinstance(value, list):\n self._total = list()\n for i in value:\n self._total.append(i)\n\n def parse_response_content(self, response_content):\n response = super(AlipayCommerceIotGroupBatchqueryResponse, self).parse_response_content(response_content)\n if 'groups' in response:\n self.groups = response['groups']\n if 'total' in response:\n self.total = response['total']\n","repo_name":"koking0/LuffyCity","sub_path":"BackEnd/Payment/other/alipay-sdk-python-3.3.398/alipay/aop/api/response/AlipayCommerceIotGroupBatchqueryResponse.py","file_name":"AlipayCommerceIotGroupBatchqueryResponse.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"2153460245","text":"from pygame import *\nfrom random import *\n\nWIDTH = 256\nHEIGHT = 256\nBLOCK = 3\nscreen = display.set_mode([BLOCK * WIDTH, BLOCK * HEIGHT])\n\nnoise_seed = []\nfor i in range(WIDTH):\n noise_seed.append(random())\n\ndef perlin_noise_1d(seed, octaves, scaling_bias):\n output = []\n for x in range(len(seed)):\n noise = 0\n scale = 1\n scale_acc = 0\n pitch = len(seed)\n\n for o in range(octaves):\n sample1 = (x // pitch) * pitch\n sample2 = (sample1 + pitch) % len(seed)\n\n blend = (x - sample1) / pitch\n sample = (1 - blend) * seed[sample1] + blend * seed[sample2]\n noise += sample * scale\n\n scale_acc += scale\n scale /= scaling_bias\n pitch //= 2\n\n output.append(noise / scale_acc)\n return output\n\ndef generate(rate, scl):\n points = perlin_noise_1d(noise_seed, rate, scl)\n\n for i in range(len(points)):\n x = BLOCK * i\n y = BLOCK * int(HEIGHT // 2 - (points[i] * HEIGHT / 2))\n draw.rect(screen, Color(0, 255, 0), Rect([x, y], [BLOCK, BLOCK*HEIGHT // 2 - y]))\n\nrate = 1\nscl = 2\nwhile True:\n for ev in event.get():\n if ev.type == KEYDOWN:\n if ev.key == K_SPACE:\n rate += 1\n if rate > 8:\n rate = 1\n elif ev.key == K_UP:\n scl += 0.2\n elif ev.key == K_DOWN:\n scl -= 0.2\n if scl <= 0.2:\n scl = 0.2\n screen.fill(Color(0, 0, 0))\n generate(rate, scl)\n\n display.update()\n time.delay(20)","repo_name":"etakerim/Hackerman","sub_path":"2-Grafika-na-obrazovke/4-Simulácie/8_lerp-1d-šum.py","file_name":"8_lerp-1d-šum.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23789401939","text":"import numpy as np\nimport pandas as pd\nimport csv\nimport multiprocessing\nimport functools\n\nfrom my_module import backward_trajectory as back\nfrom my_module import mbslib\n\n\ndef make_trajectoryfiles_backward(N0, generation, demography_in_year, f_current, s, h, resolution, \n n_trajectory, path_to_traj):\n '''\n generate trajectory file\n Args:\n N0 (int): \n generation (int): generation time, years/generation\n demography_in_year (list): demographic history/\n f_current (float): current frequency of derived allele\n s, \n h, \n resolution, \n n_trajectory (int): number of trajectories\n path_to_traj (str) : path to trajectory files (w/o extentions)\n \n '''\n for i in range(n_trajectory):\n # file name\n filename = f'{path_to_traj}_{i}.dat'\n \n # generate trajectory\n trajectory = back.mbs_input(f_current, \n demography_in_year, \n s, h, \n generation, N0, resolution,\n )\n \n # save\n with open(filename, 'w') as f:\n \n writer = csv.writer(f, delimiter='\\t')\n \n for freq in trajectory:\n writer.writerow(freq)\n \n\ndef run_mbs(nsam, per_site_theta, per_site_rho, \n lsites, selpos, \n n_trajectory, nrep_per_traj, \n path_to_mbs_output, path_to_traj):\n '''\n run mbs\n Args:\n nsam, \n per_site_theta, \n per_site_rho, \n lsites, \n selpos, \n n_trajectory (int): number of trajectory files\n nrep_per_traj (int): number of simulations per trajectory file\n path_to_mbs_output (str) : path to mbs output files (w/o extentions)\n path_to_traj (str) : path to trajectory files (w/o extentions)\n\n '''\n \n cmd = f'mbs {nsam} -t {per_site_theta} -r {per_site_rho} '\n cmd += f'-s {lsites} {selpos} '\n cmd += f'-f {n_trajectory} {nrep_per_traj} {path_to_traj} '\n cmd += f'> {path_to_mbs_output}'\n \n mbslib.run_command(cmd)\n\n\ndef parameter_sets_backward(current_frequency, sel_advantages):\n '''\n\n Args:\n current_frequency: current frequencies of selected alleles\n sel_advantages: selection coefficients\n\n Returns:\n\n '''\n params = dict()\n\n params['N0'] = 5000\n params['generation'] = 20\n params['demography_in_year'] = [[0, 100 * params['N0'] * params['generation'], params['N0']]]\n\n # selection coefficients\n params['s'] = 0\n params['h'] = 0.5 # <--- co-dominance\n params['resolution'] = 100\n\n # number of trajectory\n params['n_trajectory'] = 1000\n # coalescent simulation per trajectory\n params['nrep_per_traj'] = 1\n\n # number of chromosome\n params['nsam'] = 120\n # length of sequence\n params['lsites'] = 500000\n # position of target site\n params['selpos'] = 1\n\n # mutation rate per site per generation\n params['per_site_theta'] = 1.0 * 10 ** (-8) * 4 * params['N0']\n # recombination rate per site per generation\n params['per_site_rho'] = 1.0 * 10 ** (-8) * 4 * params['N0']\n\n params_list = list()\n for f_current in current_frequency:\n params['f_current'] = f_current\n for s in sel_advantages:\n params['s'] = s\n params_list.append(params.copy())\n\n return params_list\n\n\ndef run_mbs_to_msoutput(params, ms_data_dir, ehh_data_dir):\n '''\n run mbs and convert outputs in ms format to calculate EHH statistics\n Args:\n params:\n ms_data_dir:\n ehh_data_dir:\n\n Returns:\n\n '''\n # path to trajectory files\n path_to_traj = f\"results/traj_f{params['f_current']}_s{params['s']}\"\n\n # generate trajectory files\n make_trajectoryfiles_backward(params['N0'], params['generation'], \n params['demography_in_year'], params['f_current'], \n params['s'], params['h'], params['resolution'], \n params['n_trajectory'], path_to_traj)\n\n # path to mbs output\n path_to_mbs_output = f\"results/mbs_nsam{params['nsam']}_fcurrent{params['f_current']}_s{params['s']}.dat\"\n\n # run mbs\n run_mbs(params['nsam'], params['per_site_theta'], params['per_site_rho'], \n params['lsites'], params['selpos'], \n params['n_trajectory'], params['nrep_per_traj'], \n path_to_mbs_output, path_to_traj)\n\n # set the point at which EHH values are calculated\n distance_in_bp = 15000\n\n # convert mbs format into ms format\n with open('{}/mbs_f{}_s{}.txt'.format(ms_data_dir, params['f_current'], params['s']), 'w') as f:\n # ms command line\n f.write(\"ms {} {} -t {} -r {} {}\\n\\n\".format(params['nsam'] ,params['n_trajectory'], \n params['per_site_theta']*params['lsites'], \n params['per_site_rho']*params['lsites'], params['lsites']))\n # convert into ms format for each line\n for i in mbslib.parse_mbs_data(path_to_mbs_output):\n # change the position of mutation if it occurred at target site\n if i['pos'][0] == 1.0:\n h = mbslib.mbs_to_ms_output(i, params['selpos'], params['lsites'])\n f.write(\"//\\n\")\n # write segregation sites\n f.write(\"segsites: {}\\n\".format(len(h['pos'])))\n\n # write position\n f.write(\"positions: \")\n # convert int to str\n pos_list = [str(i) for i in h['pos']]\n # change position of the mutation occurred at the target site\n pos_list[1] = str(2/params['lsites'])\n f.write(\" \".join(pos_list))\n f.write(\"\\n\")\n\n # write seq data\n f.write(\"\\n\".join(h[\"seq\"]))\n f.write(\"\\n\\n\")\n\n else:\n h = mbslib.mbs_to_ms_output(i, params['selpos'], params['lsites'])\n f.write(\"//\\n\")\n # write segregating sites\n f.write(\"segsites: {}\\n\".format(len(h['pos'])))\n\n # write position\n f.write(\"positions: \")\n # convert int to str\n pos_list = [str(i) for i in h['pos']]\n f.write(\" \".join(pos_list))\n f.write(\"\\n\")\n\n # write seq\n f.write(\"\\n\".join(h[\"seq\"]))\n f.write(\"\\n\\n\")\n\n # run R script to calculate EHH statistics\n mbslib.run_command('Rscript calc_ehh_backward.R {} {} {} {} {} {} {}'.format(params['f_current'], params['n_trajectory'],\n params['lsites'], params['s'], ms_data_dir, ehh_data_dir,\n distance_in_bp))\n\n print(params['f_current'], params['s'], 'done')\n\n\ndef calc_percentile(current_frequency, ehh_data_dir, percentile_data_dir):\n # percentile lists\n rEHH_percentile_list = []\n iHS_percentile_list = []\n bins = np.arange(1, 100, 1)\n for i in current_frequency:\n EHH_data = pd.read_csv(\"{}/EHH_data_f{}_s0.csv\".format(ehh_data_dir, i))\n #EHH_data = EHH_data.replace({'rEHH': {np.inf: float('inf')}})\n # condition on the case that more than two of ancestral and derived alleles are contained in samples\n EHH_data = EHH_data[EHH_data['iHH_A']!=0]\n EHH_data = EHH_data[EHH_data['iHH_D']!=0]\n EHH_data = EHH_data[:1000]\n # calculate percentile\n rEHH_percentile_list.append(list(np.percentile(EHH_data['rEHH'], bins)))\n iHS_percentile_list.append(list(np.percentile(EHH_data['iHS'], bins)))\n\n df_rEHH = pd.DataFrame(rEHH_percentile_list, columns = bins, index = current_frequency)\n df_rEHH.to_csv('{}/rEHH_percentile.csv'.format(percentile_data_dir))\n\n df_iHS = pd.DataFrame(iHS_percentile_list, columns = bins, index = current_frequency)\n df_iHS.to_csv('{}/iHS_percentile.csv'.format(percentile_data_dir))\n\n\ndef main():\n # initial values\n # current frequency of derived allele\n current_frequency = [0.01, 0.05, 0.1, 0.15, 0.2, 0.25,\n 0.3, 0.35, 0.4, 0.45, 0.5, 0.55,\n 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,\n 0.9, 0.95, 0.99]\n # selection coefficients\n sel_advantages = [0]\n # parameters sets\n testlist = parameter_sets_backward(current_frequency, sel_advantages)\n # ms data directory name\n ms_data_dir = 'results'\n # ehh data directory\n ehh_data_dir = 'ehh_data'\n # percentile data directory\n percentile_data_dir = 'percentile_data'\n\n\n n_cpu = int(multiprocessing.cpu_count()/2)\n with multiprocessing.Pool(processes=n_cpu) as p:\n p.map(functools.partial(run_mbs_to_msoutput, ms_data_dir=ms_data_dir, ehh_data_dir=ehh_data_dir), testlist)\n\n # calculate percentile\n calc_percentile(current_frequency, ehh_data_dir, percentile_data_dir)\n\nif __name__==\"__main__\":\n main()\n\n\n","repo_name":"ttomo3535/power_of_neutrality_tests","sub_path":"constant_pop/calc_ehh_null.py","file_name":"calc_ehh_null.py","file_ext":"py","file_size_in_byte":9152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41700137384","text":"import itertools\nimport numpy as np\nimport networkx as nx\nfrom typing import List, Tuple, Optional\n\n\ndef create_graph(row_min: int, row_max: int, col_min: int, col_max: int,\n kernel: Optional[List[Tuple[int, int]]] = None) -> nx.Graph:\n \"\"\"produces a graph with nodes densely packing the inclusive\n bounds defined by row/col_min/max\n\n Parameters\n ----------\n row_min: int\n minimum value of row index\n row_max: int\n maximum value of row index\n col_min: int\n minimum value of col index\n col_max: int\n maximum value of col index\n kernel: List[Tuple[int, int]]\n N x 2: [(r0, c0), (r1, c1), ...] each (ri, ci) pair\n defines a relative (row, col) neighbor for creating a graph edge\n\n Returns\n -------\n graph: nx.Graph\n an undirected networkx graph, free of attributes\n\n \"\"\"\n\n if kernel is None:\n # relative indices of 8 nearest-neighbors\n kernel = list(itertools.product([-1, 0, 1], repeat=2))\n kernel.pop(kernel.index((0, 0)))\n\n graph = nx.Graph()\n\n if (row_min == row_max) & (col_min == col_max):\n # trivial case with no edges and 1 node\n graph.add_node((row_min, col_min))\n return graph\n\n rows, cols = np.mgrid[row_min:(row_max + 1), col_min:(col_max + 1)]\n for edge_start in zip(rows.flat, cols.flat):\n for drow, dcol in kernel:\n edge_end = (edge_start[0] + drow, edge_start[1] + dcol)\n if (edge_end[0] < row_min) | (edge_end[0] > row_max):\n continue\n if (edge_end[1] < col_min) | (edge_end[1] > col_max):\n continue\n edge = [edge_start, edge_end]\n if not graph.has_edge(*edge):\n graph.add_edge(*edge)\n return graph\n","repo_name":"AllenInstitute/ophys_etl_pipelines","sub_path":"src/ophys_etl/modules/segmentation/graph_utils/creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"7616037866","text":"print('\\033[1;33m##### DESAFIO 73 #####\\033[m\\n')\n#Tabela Laliga\ncont = 0\ntab = ('Atlético de Madrid', 'Barcelona', 'Real Madrid', 'Sevilla', 'Real Sociedad',\n 'Betis', 'Villarreal', 'Granada', 'Athletic Bilbao', 'Levante', 'Celta', 'Valencia',\n 'Osasuna', 'Getafe', 'Cádiz', 'Valladolid', 'Elche', 'Eibar', 'Alavés', 'Huesca')\nprint('''[1] Tabela Completa da LaLiga\n[2] Os 4 primeiros Colocados\n[3] Os 3 últimos Colocados\n[4] Os times em ordem alfabética\n[5] Em que posição Barcelona está colocado\\n''')\nwhile True:\n esc = int(input('O que deseja ver? Escolha um número de 1 a 5: '))\n if esc < 1 or esc > 5:\n print('Tente novamente. ', end='')\n else:\n break\nif esc == 1:\n print(tab)\nif esc == 2:\n print(tab[:4])\nif esc == 3:\n print(tab[-3:])\nif esc == 4:\n print(sorted(tab))\nif esc == 5:\n club = str(input('Qual clube você quer ver a colocação? '))\n for p in tab:\n if p == club:\n print(f'O {club} está na {tab.index(club)+1}º posição.')\nprint('\\nEspero que tenha gostado!!!\\n')\n","repo_name":"ismael211/Curso-Python","sub_path":"Python - Curso em Video 1, 2 ,3/pt073.py","file_name":"pt073.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70456323784","text":"from muse import Muse\nimport time\nimport requests\nimport liblo\n\naddress = \"00:55:DA:B3:94:D9\"\nhost = \"192.168.1.103\"\nport = 1337\nbackend = 'gatt'\ndevice_type = \"muse\"\n\ncountACC = 0;\ndef process():\n global now\n global countACC\n now = time.time()\n countACC+=1.0\n\n\nmuse = Muse(address=address,device_type=device_type,host=host,port=port,callback=process,backend=backend,interface=None)\n\nmuse.connect()\nprint('Connected')\nmuse.start()\nprint('Streaming')\nidx =0\nlosshist =[0 for i in range(10)]\nwhile 1:\n try:\n time.sleep(1)\n dataloss =max(0.0,100.0-countACC*3/50*100.0)\n losshist[idx] = dataloss\n idx=(idx+1)%10\n avgloss =sum(losshist)/float(len(losshist))\n print('loss: %2f' % (dataloss))\n #print('waited: %2f' % (time.time()-now), 'dataloss: %.1f' % dataloss,'avgloss: %f' % avgloss )\n countACC = 0;\n if ((time.time()-now)>500):\n break\n if ((avgloss>40)):\n break\n except:\n print(\"failed\")\n break\n\n\nmuse.disconnect()\n\n","repo_name":"jackagalvin/dmtEvent","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42196208357","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 14:44:50 2018\n\n@author: sivar\n\"\"\"\n\n\n\n#easydata = pd.read_csv(\"/Users/travisbarton/Documents/GitHub/Redditbot/VDifferentData.csv\")\neasydata = pd.read_csv(r\"C:\\Users\\sivar\\OneDrive\\Documents\\GitHub\\Redditbot\\VDifferentData.csv\")\neasydata.columns = ['id', 'title', 'tag']\nreddit = praw.Reddit(user_agent='Comment Extraction (by /u/USERNAME)',\n client_id='b8unlbKK1rWOow', client_secret='FuFwla268qevA5Ju1MgRPs2Sihg',\n username=base64.b64decode('bWF0aF9pc19teV9yZWxpZ2lvbg=='), \n password=(base64.b64decode(\"U2lyemlwcHkx\")))\nST = reddit.subreddit('showerthoughts')\ni = easydata.shape[0]\n\nfor post in ST.top(\"all\", limit = 1000):\n easydata.loc[i,:] = [i, post.title, 3]\n i = i+1\n\n\n\neasydat = np.empty([easydata.shape[0],301]) \n\nfor i in range(easydat.shape[0]):\n vecs = nlp(easydata.iloc[i,1]).vector\n for j in range(300):\n easydat[i,j] = vecs[j]\n \ntags = easydata.tag \neasydat[:,300] =easydata.tag\ndat = easydat.copy()\n\n\n \n\nX_train, X_test, y_train, y_test = train_test_split(dat[:,:300], \n dat[:,300], \n test_size=0.15, \n random_state=RS) \n\n\ny_train = y_train.reshape(len(y_train), 1).astype(int)\ny_test = y_test.reshape(len(y_test), 1).astype(int) \n\ny_train = onehot_encoder.fit_transform(y_train)\ny_test = onehot_encoder.fit_transform(y_test) \n\ntemp = Binary_network(X_train, y, X_test, \"dont matter yet\", .1, 50, 15, 30)\n\n\ntrain_res = np.round(temp[0]).astype(int)\ntest_res = np.round(temp[1]).astype(int)\n\n\nPred_to_num(y_test)[0:10]\n\n1-sum(train_res == Pred_to_num(y_train))/dat.shape[0]\n1-sum(test_res == Pred_to_num(y_test))/dat.shape[0]\nplot_confusion_matrix(confusion_matrix(Pred_to_num(y_train), train_res), [0,1], normalize = True, title = \"Is test good?\")\nplot_confusion_matrix(confusion_matrix(Pred_to_num(y_test), test_res), [0,1], normalize = True, title = \"Is test good?\")\n\n\n#When you pick up next time. You are working on integrating the binary networks\n#you have it returning the probability of being in the first column rn.\n# next time erase label paremeter and input x_test parameter\n\n\n\n############### test #2 \neasydat = np.empty([easydata.shape[0],300]) \n\nfor i in range(easydat.shape[0]):\n vecs = nlp(easydata.iloc[i,1]).vector\n for j in range(300):\n easydat[i,j] = vecs[j]\n \ntags = easydata.tag \n\ntag = []\nfor i in range(len(tags)):\n if tags.iloc[i] == 1.0:\n tag.append('Aww')\n elif tags.iloc[i] == 2.0:\n tag.append('Politics')\n else:\n tag.append(\"ST\")\n\ndat = easydat.copy()\n\nonehot_encoder = OneHotEncoder(sparse=False) \n \n\nX_train, X_test, y_train, y_test = train_test_split(dat[:,:300], \n tag, \n test_size=0.25, \n random_state=RS) \n\n\n\n\n\n\n\nresults = []\nresults = Feed_reduction(X_train, y_train, X_test, np.unique(y_train), nodes = 50)\nnew_X = results[0]\nnew_X_test = results[1]\n\n\n# Feed networks\nclf = svm.SVC(gamma='scale')\nclf.fit(new_X, y_train) \ndrumroll = clf.predict(new_X_test)\n\nprint('the accuracy of Feed networks: {}'.format(sum(drumroll == y_test)/len(y_test)*100))\n\n\n\n# SVM\nclf = svm.SVC(gamma='scale')\nclf.fit(X_train, y_train) \ndrumroll2 = clf.predict(X_test)\n\nprint('the accuracy of just SVM is: {}'.format(sum(drumroll2 == y_test)/len(y_test)*100))\n\n\n# Full network\n\ny = pd.factorize(y_train)[0]\ny = y.reshape(len(y), 1).astype(int) \ny = onehot_encoder.fit_transform(y)\n\nmodel = Sequential()\n\nmodel.add(Dense(50, input_dim = X_train.shape[1], activation = 'linear'))\nmodel.add(LeakyReLU(alpha=.001))\nmodel.add(Dropout(.4))\nmodel.add(Dense(50, activation = 'linear'))\nmodel.add(LeakyReLU(alpha = .001))\nmodel.add(Dense(3, activation = 'softmax')) \n \nmodel.compile(loss='categorical_crossentropy', \n optimizer='adam', \n metrics=['accuracy'])\n #filepath=\"Best_{}.hdf5\".format(label)\n #checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, \n # save_best_only=True, mode='max')\n #callbacks_list = [checkpoint]\nmodel_history = model.fit(X_train, y, \n epochs=20, batch_size=30, \n verbose = 0, validation_split = .2)\nFull_results = Pred_to_num(model.predict(X_test))\nfor i in range(len(Full_results)):\n if Full_results[i] == 1:\n Full_results[i] = 2\n elif Full_results[i] == 2:\n Full_results[i] == 1\n else:\n pass\n\n\nsum(Full_results == pd.factorize(y_test)[0])/len(y_test)\n\n\nyt = pd.factorize(y_test)[0]\nyt = yt.reshape(len(yt), 1).astype(int) \nyt = onehot_encoder.fit_transform(yt)\n\nmodel.evaluate(X_test, yt)\n\n\n\n","repo_name":"Travis-Barton/Redditbot","sub_path":"Old Code/Old Coded networks/Feed_network_maker_testing.py","file_name":"Feed_network_maker_testing.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38575454906","text":"#!/usr/bin/python3\nnumber_keys = __import__('5-number_keys').number_keys\n\na_dictionary = { 'language': \"C\", 'number': 13, 'track': \"Low level\" }\nnb_keys = number_keys(a_dictionary)\nprint(\"Number of keys: {:d}\".format(nb_keys))\nimport pycodestyle\n\ndef check_pep8_compliance(filename):\n style_guide = pycodestyle.StyleGuide()\n result = style_guide.check_files([filename])\n return result.total_errors\n\nfilename = \"5-number_keys.py\" # Nom du fichier\nnum_errors = check_pep8_compliance(filename)\n\nif num_errors == 0:\n print(f\"The code in {filename} is PEP 8 compliant.\")\nelse:\n print(f\"{num_errors} PEP 8 violations found in {filename}.\")\n\n","repo_name":"SalahOummouch/alx-higher_level_programming","sub_path":"5-main.py","file_name":"5-main.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28216835275","text":"from tensorflow import keras as ks\nimport numpy as np\n\nclass_indices = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,\n 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, \n 'K': 20, 'L': 21, 'M': 22, 'N': 23, 'O': 24, 'P': 25, 'Q': 26, 'R': 27, 'S': 28, 'T': 29, \n 'U': 30, 'V': 31, 'W': 32, 'X': 33, 'Y': 34, 'Z': 35, 'del': 36, 'nothing': 37, 'space': 38}\n\ndatamap = dict()\n\nfor index in class_indices:\n datamap[class_indices[index]] = index\n\nmodel = ks.models.load_model(\"Models/cnnModel1.h5\")\nmodel_asze = ks.models.load_model(\"Models/cnnModel_asze.h5\")\nmodel_ghu = ks.models.load_model(\"Models/cnnModel_ghu.h5\")\nmodel_jy = ks.models.load_model(\"Models/cnnModel_jy.h5\")\nmodel_ltx = ks.models.load_model(\"Models/cnnModel_ltx.h5\")\nmodel_mnb = ks.models.load_model(\"Models/cnnModel_mnb.h5\")\nmodel_vkwf = ks.models.load_model(\"Models/cnnModel_vkwf.h5\")\n\ndef predict(sign):\n image = ks.preprocessing.image.img_to_array(sign)\n image = np.expand_dims(image,axis=0)\n \n result = model.predict(image)\n prediction = result[0].argsort()[::-1][:39]\n\n char = datamap[prediction[0]]\n\n if char == 'G' or char == 'H' or char == 'U':\n result_ghu = model_ghu.predict(image)\n prediction_ghu = result_ghu[0].argsort()[::-1][:3]\n\n if prediction_ghu[0] == 0:\n char = 'G'\n elif prediction_ghu[0] == 1:\n char = 'H'\n else:\n char = 'U'\n\n if char == 'J' or char == 'Y':\n result_jy = model_jy.predict(image)\n prediction_jy = result_jy[0].argsort()[::-1][:2]\n\n if prediction_jy[0] == 0:\n char = 'J'\n else:\n char = 'Y'\n\n if char == 'L' or char == 'T' or char == 'X':\n result_ltx = model_ltx.predict(image)\n prediction_ltx = result_ltx[0].argsort()[::-1][:3]\n\n if prediction_ltx[0] == 0:\n char = 'L'\n elif prediction_ltx[0] == 1:\n char = 'T'\n else:\n char = 'X'\n\n if char == 'B' or char == 'M' or char == 'N':\n result_mnb = model_mnb.predict(image)\n prediction_mnb = result_mnb[0].argsort()[::-1][:3]\n\n if prediction_mnb[0] == 0:\n char = 'B'\n elif prediction_mnb[0] == 1:\n char = 'M'\n else:\n char = 'N'\n\n return char\n\n","repo_name":"Divu2611/Silent-Talk","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70706653384","text":"# -*- coding: utf-8 -*-\nfrom collective.mailchimp.browser import portlet as mailchimp\nfrom collective.mailchimp.testing import (\n COLLECTIVE_MAILCHIMP_INTEGRATION_TESTING,\n)\nfrom plone.app.portlets.storage import PortletAssignmentMapping\nfrom plone.app.testing import setRoles\nfrom plone.app.testing import SITE_OWNER_NAME\nfrom plone.app.testing import SITE_OWNER_PASSWORD\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.portlets.interfaces import IPortletAssignment\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.portlets.interfaces import IPortletManager\nfrom plone.portlets.interfaces import IPortletRenderer\nfrom plone.portlets.interfaces import IPortletType\nfrom plone.testing.z2 import Browser\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.site.hooks import setHooks\n\nimport unittest\n\n\nclass TestPortlet(unittest.TestCase):\n\n layer = COLLECTIVE_MAILCHIMP_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n setHooks()\n\n def testPortletTypeRegistered(self):\n portlet = getUtility(IPortletType, name='portlet.MailChimp')\n self.assertEquals(portlet.addview, 'portlet.MailChimp')\n\n def testInterfaces(self):\n portlet = mailchimp.Assignment(name=\"foo\")\n self.failUnless(IPortletAssignment.providedBy(portlet))\n self.failUnless(IPortletDataProvider.providedBy(portlet.data))\n\n def testInvokeAddview(self):\n portlet = getUtility(IPortletType, name='portlet.MailChimp')\n mapping = self.portal.restrictedTraverse(\n '++contextportlets++plone.leftcolumn'\n )\n for m in mapping.keys():\n del mapping[m]\n addview = mapping.restrictedTraverse('+/' + portlet.addview)\n addview.createAndAdd(data={})\n\n self.assertEquals(len(mapping), 1)\n self.failUnless(isinstance(mapping.values()[0], mailchimp.Assignment))\n\n def testInvokeEditView(self):\n mapping = PortletAssignmentMapping()\n request = self.portal.REQUEST\n\n mapping['foo'] = mailchimp.Assignment(name=\"foo\")\n editview = getMultiAdapter((mapping['foo'], request), name='edit')\n self.failUnless(isinstance(editview, mailchimp.EditForm))\n\n def testRenderer(self):\n context = self.portal\n request = self.portal.REQUEST\n view = self.portal.restrictedTraverse('@@plone')\n manager = getUtility(\n IPortletManager, name='plone.leftcolumn', context=self.portal\n )\n assignment = mailchimp.Assignment(name=\"foo\")\n\n renderer = getMultiAdapter(\n (context, request, view, manager, assignment), IPortletRenderer\n )\n self.failUnless(isinstance(renderer, mailchimp.Renderer))\n\n\nclass TestRenderer(unittest.TestCase):\n\n layer = COLLECTIVE_MAILCHIMP_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n setHooks()\n # Make sure News Items use simple_publication_workflow\n self.portal.portal_workflow.setChainForPortalTypes(\n ['News Item'], ['simple_publication_workflow']\n )\n\n def renderer(\n self,\n context=None,\n request=None,\n view=None,\n manager=None,\n assignment=None,\n ):\n context = context or self.portal\n request = request or self.portal.REQUEST\n view = view or self.portal.restrictedTraverse('@@plone')\n manager = manager or getUtility(\n IPortletManager, name='plone.leftcolumn', context=self.portal\n )\n assignment = assignment or mailchimp.Assignment(\n template='portlet_recent', macro='portlet'\n )\n\n return getMultiAdapter(\n (context, request, view, manager, assignment), IPortletRenderer\n )\n\n\nclass TestPortletIntegration(unittest.TestCase):\n\n layer = COLLECTIVE_MAILCHIMP_INTEGRATION_TESTING\n\n def setUp(self):\n app = self.layer['app']\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n self.portal_url = self.portal.absolute_url()\n\n self.browser = Browser(app)\n self.browser.handleErrors = False\n self.browser.addHeader(\n 'Authorization',\n 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD),\n )\n\n def test_add_portlet_form(self):\n self.browser.open(\n self.portal_url\n + \"/++contextportlets++plone.leftcolumn/+/portlet.MailChimp\"\n )\n\n self.assertTrue(\"Add MailChimp Portlet\" in self.browser.contents)\n self.assertTrue(\"Title\" in self.browser.contents)\n self.assertTrue(\"Available lists\" in self.browser.contents)\n self.assertTrue(\"ACME Newsletter\" in self.browser.contents)\n self.assertTrue(\"ACME Newsletter 2\" in self.browser.contents)\n\n def test_add_portlet(self):\n self.browser.open(\n self.portal_url\n + \"/++contextportlets++plone.leftcolumn/+/portlet.MailChimp\"\n )\n self.browser.getControl(\"Title\").value = \"ACME Newsletter Portlet\"\n self.browser.getControl(\n name=\"form.widgets.available_lists:list\", index=0\n ).value = [\"f6257645gs\"]\n self.browser.getControl(\"Save\").click()\n\n self.assertEqual(\n self.browser.url, self.portal_url + '/@@manage-portlets'\n )\n self.assertTrue(\"Hide\" in self.browser.contents)\n self.assertTrue(\"MailChimp\" in self.browser.contents)\n\n self.browser.open(self.portal_url)\n self.assertTrue(\"ACME Newsletter Portlet\" in self.browser.contents)\n self.assertTrue(\"Email address\" in self.browser.contents)\n\n def test_edit_portlet(self):\n # Create portlet\n self.browser.open(\n self.portal_url\n + \"/++contextportlets++plone.leftcolumn/+/portlet.MailChimp\"\n )\n self.browser.getControl(\"Title\").value = \"ACME Newsletter Portlet\"\n self.browser.getControl(\n name=\"form.widgets.available_lists:list\", index=0\n ).value = [\"f6257645gs\"]\n self.browser.getControl(\"Save\").click()\n # Edit portlet\n self.browser.open(\n self.portal_url\n + \"/++contextportlets++plone.leftcolumn/mailchimp/edit\"\n )\n self.browser.getControl(\"Title\").value = \"Lorem Ipsum\"\n self.browser.getControl(\n name=\"form.widgets.available_lists:list\", index=0\n ).value = [\"f6267645gs\"]\n self.browser.getControl(\"Save\").click()\n\n self.browser.open(\n self.portal_url\n + \"/++contextportlets++plone.leftcolumn/mailchimp/edit\"\n )\n self.assertTrue(\"Lorem Ipsum\" in self.browser.contents)\n self.browser.open(self.portal_url)\n self.assertTrue(\"Lorem Ipsum\" in self.browser.contents)\n\n\ndef test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"collective/collective.mailchimp","sub_path":"src/collective/mailchimp/tests/test_portlet.py","file_name":"test_portlet.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"30955922304","text":"from modules.printutils import *\n\nbig_banner(\"\"\"\n Iterators: Custom\n ---------\n\n\"\"\")\n\n\nbanner(\"\"\"\nCounter Class\n\"\"\")\n# ------------------------------\n\nclass Counter:\n def __init__(self, low, high):\n self.current = low\n self.high = high\n\n def __iter__(self):\n # need to return an iterator\n # return iter(\"hello\") <-- this works\n return self\n\n def __next__(self):\n if self.current < self.high + 1:\n num = self.current\n self.current += 1\n return num\n raise StopIteration\n\nfor n in Counter(50, 70):\n print(n)","repo_name":"rob-kistner/modern-python","sub_path":"orig_py_files/iterators_custom.py","file_name":"iterators_custom.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31650735770","text":"##### Importando Modulos ####\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, ClientsideFunction\nimport dash_table\nimport numpy as np\nimport pandas as pd\nimport openpyxl\nimport datetime\nfrom datetime import datetime as dt\nimport pathlib\n\nimport plotly.graph_objects as go\n\n##### Criando App e Server #####\n\napp = dash.Dash(\n __name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\n)\n\nserver = app.server\napp.config.suppress_callback_exceptions = True\n\nusar_base_bruta = False\n\n##### Lendo as bases de dados #####\n\nd_sh2 = pd.read_excel('data/d_sh2.xlsx', engine='openpyxl')\nd_sh2.columns = ['COD_NCM', 'NM_NCM', 'COD_SH2', 'NM_SH2']\n\nd_via = pd.read_excel('data/d_via.xlsx', engine='openpyxl')\nd_via.columns = ['COD_VIA', 'NM_VIA']\n\nif usar_base_bruta:\n\n f_comex = pd.read_csv('data/f_comex_.csv', sep = ';')\n f_comex = f_comex.merge(d_sh2[['COD_NCM', 'COD_SH2']], on = 'COD_NCM', how = 'inner')\n\n f_comex = f_comex.groupby(['ANO', 'MES', 'MOVIMENTACAO', 'SG_UF', 'COD_VIA', 'COD_SH2']).agg(VL_FOB = ('VL_FOB', 'sum')).reset_index()\n\n f_comex.to_csv('data/f_comex.csv', index = False, sep = ';')\n\nelse:\n\n f_comex = pd.read_csv('data/f_comex.csv', sep = ';') \n\n##### Criando controles para filtro #####\n\n# SH2\n\naux_sh2 = d_sh2[['COD_SH2', 'NM_SH2']].drop_duplicates().copy()\n\nsh2_options = [\n {\"label\": str(NM_SH2), \"value\": str(COD_SH2)} for COD_SH2, NM_SH2 in zip(aux_sh2['COD_SH2'], aux_sh2['NM_SH2'])\n]\n\ndel aux_sh2\n\n# VIA\n\nvia_options = [\n {\"label\": str(NM_VIA), \"value\": str(COD_VIA)} for COD_VIA, NM_VIA in zip(d_via['COD_VIA'], d_via['NM_VIA'])\n]\n\n# MOVIMENTAÇÃO\n\nmov_options = [\n {\"label\": str(MOVIMENTACAO), \"value\": str(MOVIMENTACAO)} for MOVIMENTACAO in f_comex['MOVIMENTACAO'].drop_duplicates()\n]\n\n# ANO\n\nyear_options = [\n {\"label\": str(ANO), \"value\": str(ANO)} for ANO in f_comex['ANO'].drop_duplicates()\n]\n\n##### Criando Função de Descrição #####\n\ndef descricao():\n \"\"\"\n :return: A Div contentando o titulo do DashBoard e uma breve descrição.\n \"\"\"\n return html.Div(\n id=\"descricao\",\n children=[\n html.H5(\"Comercio Exterior\"),\n html.H3(\"Bem vindo ao relatório de comercio exterior do Observatorio da Indústria da FIEC.\"),\n html.Div(\n id = \"intro\",\n children = \"Esse dash tem como principal objetivo ajudar na tomada de decisões relacionadas a importação e exportação de produtos.\",\n ),\n ],\n )\n\n##### Criando função de Filtros #####\n\ndef filtros():\n \"\"\"\n :return: A Div contendo os filtros utilzados\n \"\"\"\n return html.Div(\n id = \"filtros\",\n children = [\n\n # ANO\n\n html.P(\"Selecione o ANO\"),\n dcc.Dropdown(\n id = \"ano-filtro\",\n options = year_options,\n value = dt.now().year - 1,\n clearable = False\n ),\n html.Br(),\n html.Br(), \n \n # MOVIMENTO\n\n html.P(\"Selecione o MOVIMENTO\"),\n dcc.Dropdown(\n id = \"mov-filtro\",\n options = mov_options,\n value = mov_options[0]['value'],\n clearable = False\n ),\n html.Br(),\n html.Br(),\n\n # SH2\n\n html.P(\"Seleciona o Grupo SH2\"),\n dcc.Dropdown(\n id=\"sh2-filtro\",\n options=sh2_options,\n value = np.nan,\n multi = True,\n ),\n html.Br(),\n html.Br(),\n\n # VIA\n\n html.P(\"Seleciona o Grupo VIA\"),\n dcc.Dropdown(\n id=\"via-filtro\",\n options=via_options,\n value = np.nan,\n multi = True,\n ),\n html.Br(),\n html.Br()\n ],\n )\n\n\n##### Definindo Layout #####\n\napp.layout = html.Div(\n id = \"app\",\n children = [\n\n # Cabeçalho\n\n html.Div(\n id = \"cabecalho\",\n className = \"banner\",\n children = [html.Img(src = app.get_asset_url('logo.png'))]\n ),\n\n # Coluna Esquerda\n\n html.Div(\n id = \"coluna-esquerda\",\n className = \"four columns\",\n children = [descricao(), filtros()]\n ),\n\n # Cards\n\n html.Div(\n [\n html.Div(\n [html.H6(id = \"total-importacao\"), html.P(\"Valor Importado\")],\n id = \"valor-importado\",\n className = \"mini_container\",\n ),\n html.Div(\n [html.H6(id = \"total-exportacao\"), html.P(\"Valor Exportado\")],\n id = \"valor-exportado\",\n className = \"mini_container\",\n )\n ]\n ),\n\n # Coluna Direita\n\n html.Div(\n id = \"coluna-direita\",\n className = \"eight columns\",\n children = [\n \n # Grafico de barra\n\n html.Div(\n id = \"grafico1\",\n children = [\n html.B(\"Valor Financeira\"),\n html.Hr(),\n dcc.Graph(id=\"grafico-valor-financeiro-mensal\")\n ]),\n\n # Grafico de pizza\n\n html.Div(\n id = \"grafico2\",\n children = [\n html.B(\"Segmentação por VIA\"),\n html.Hr(),\n dcc.Graph(id=\"grafico-via\")\n ]),\n\n # Tabela\n\n html.B(\"Comparação por Estado\"),\n html.Hr(),\n dash_table.DataTable(\n id='tabela-comparativa-por-estado',\n columns=[{\"name\": i, \"id\": i} for i in ['Estado', 'Valor', 'Participação', 'Ano Anterior']]\n ),\n ],\n ),\n ],\n)\n\n@app.callback(\n Output(\"grafico-valor-financeiro-mensal\", \"figure\"),\n Output(\"grafico-via\", \"figure\"),\n [\n Input(\"ano-filtro\", \"value\"),\n Input(\"mov-filtro\", \"value\"),\n Input(\"sh2-filtro\", \"value\"),\n Input(\"via-filtro\", \"value\")\n ],\n)\ndef atualizar_graficos(ano, mov, sh2, via):\n\n # Ano\n\n filtro_ano = (f_comex['ANO'].astype(str) == ano)\n\n # Movimentação\n\n filtro_mov = (f_comex['MOVIMENTACAO'].astype(str) == mov)\n\n # SH2\n\n filtro_sh2 = pd.Series([True for i in range(0, len(f_comex['COD_SH2']))])\n\n if sh2 != None:\n\n filtro_sh2 = (f_comex['COD_SH2'].isin(sh2))\n\n # Via\n\n filtro_via = pd.Series([True for i in range(0, len(f_comex['COD_VIA']))])\n\n if via != None:\n\n filtro_via = (f_comex['COD_VIA'].isin(via))\n\n # Aplicando Filtros\n\n aux = f_comex.loc[filtro_ano & filtro_mov & filtro_sh2 & filtro_via].copy()\n\n ##### Criando Gráfico de Barras ####\n\n aux1 = aux.groupby('MES').agg(Valor = ('VL_FOB', 'sum')).reset_index().rename(columns = {'MES': 'Mes'})\n\n fig1 = go.Figure(\n [\n go.Bar(\n x = aux1['Mes'], \n y = aux1['Valor']\n )\n ]\n )\n\n ##### Criando Gráfico de Pizza #####\n\n aux2 = aux.groupby('COD_VIA').agg(Valor = ('VL_FOB', 'sum')).reset_index()\n aux2 = aux2.merge(d_via, on = 'COD_VIA', how = 'left')[['NM_VIA', 'Valor']].rename(columns = {'NM_VIA': 'Nome Via'})\n\n fig2 = go.Figure(\n data = [\n go.Pie(\n labels = aux2['Nome Via'], \n values = aux2['Valor']\n )\n ]\n )\n\n return fig1, fig2\n\n\n@app.callback(\n Output(\"tabela-comparativa-por-estado\", \"data\"),\n Output(\"valor-importado\", \"children\"),\n Output(\"valor-exportado\", \"children\"),\n [\n Input(\"ano-filtro\", \"value\"),\n Input(\"mov-filtro\", \"value\"),\n Input(\"sh2-filtro\", \"value\"),\n Input(\"via-filtro\", \"value\")\n ],\n)\ndef atualizar_tabela_e_cards(ano, mov, sh2, via):\n\n # Ano\n\n filtro_ano = (f_comex['ANO'].astype(str) == ano)\n\n # Movimentação\n\n filtro_mov = (f_comex['MOVIMENTACAO'].astype(str) == mov)\n\n # SH2\n\n filtro_sh2 = pd.Series([True for i in range(0, len(f_comex['COD_SH2']))])\n\n if sh2 != None:\n\n filtro_sh2 = (f_comex['COD_SH2'].isin(sh2))\n\n # Via\n\n filtro_via = pd.Series([True for i in range(0, len(f_comex['COD_VIA']))])\n\n if via != None:\n\n filtro_via = (f_comex['COD_VIA'].isin(via))\n\n # Aplicando Filtros\n\n aux_base0 = f_comex.loc[filtro_ano & filtro_sh2 & filtro_via].copy()\n aux_base = aux_base0.loc[filtro_mov].copy()\n\n aux = aux_base.groupby(['SG_UF']).agg(Valor = ('VL_FOB', 'sum')).reset_index().rename(columns = {'SG_UF': 'Estado'})\n\n aux['Participação'] = round(aux['Valor']*100/aux['Valor'].sum(),2)\n\n # Montando variáveis de participação no Ano Anterior\n\n base_ano_anterior_comex = f_comex.loc[(f_comex['ANO'] == int(ano) - 1) & filtro_mov & filtro_sh2 & filtro_via].copy()\n\n base_ano_anterior_comex_group = base_ano_anterior_comex.groupby(['SG_UF']).agg(Valor = ('VL_FOB', 'sum')).reset_index().rename(columns = {'SG_UF': 'Estado'})\n\n base_ano_anterior_comex_group['Ano Anterior'] = round(base_ano_anterior_comex_group['Valor']*100/base_ano_anterior_comex_group['Valor'].sum(),2)\n\n # Montando tabela final\n\n aux = aux.merge(base_ano_anterior_comex_group[['Estado', 'Ano Anterior']], on = 'Estado', how = 'left')\n\n return aux.to_dict('records'), aux_base0.loc[aux_base0['MOVIMENTACAO'] == \"Importação\", 'VL_FOB'].sum(), aux_base0.loc[aux_base0['MOVIMENTACAO'] == \"Exportação\", 'VL_FOB'].sum()\n\nif __name__ == '__main__':\n\n app.run_server(debug=True)\n\n","repo_name":"denerdavi1/FIEC_Avaliacao","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9838,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22437316479","text":"from kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.core.window import Window\n\nfrom catchme import CatchMe\nfrom automaton import Automaton\n\n#### TO USE ####\n# size_hint=(None, None),\n# size = self.parent.size\n# with self.canvas:\n# Color(1,0,0,1)\n# Rectangle(\n# pos = self.parent.pos,\n# size_hint = (None, None),\n# size = self.parent.size\n# )\n#### TO USE ####\n\nclass MainWidget(Widget):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n grid_size = int(min(Window.size)*2/3)\n \n catch_me = CatchMe(\n size = (grid_size, grid_size),\n x = (Window.size[0]-grid_size)/2,\n y = (Window.size[1]-grid_size)/2)\n # self.add_widget(catch_me)\n \n autom = Automaton()\n self.add_widget(autom)\n autom.reset()\n\n\nclass FreeAdsApp(App):\n def build(self):\n self.width = Window.width\n self.height = Window.height\n return MainWidget()\n \n \n\nif __name__ == \"__main__\":\n \n app = FreeAdsApp()\n app.run()\n ","repo_name":"NoniosTheMad/FreeAds","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70703898184","text":"import logging\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, MutableMapping, Union\n\nimport wandb\nfrom omegaconf import OmegaConf\n\nlog = logging.getLogger(__name__)\n\n# Excluded from the wandb dashboard\nEXCLUDE_HPARAMS = (\n \"data.root\",\n \"data.normalize\",\n \"data.train.seed\",\n \"data.val.seed\",\n \"data.val.viz_samples\",\n \"data.test.vqa.seed\",\n \"other\",\n \"logging\",\n \"training.checkpoint_interval\",\n \"training.val_interval\",\n \"training.viz_interval\",\n \"training.test_interval\",\n)\n\n\ndef setup_wandb(cfg):\n \"\"\"Create wandb run, save config, log, and hyperparameters\"\"\"\n wandb.init(\n project=cfg.logging.project,\n group=cfg.logging.group,\n id=cfg.logging.id,\n name=cfg.logging.name,\n tags=cfg.logging.tags,\n notes=cfg.logging.notes,\n config=filter_cfg_for_wandb(cfg),\n mode=cfg.logging.mode,\n )\n wandb.save(\"train.yaml\", policy=\"now\")\n wandb.save(\"train.log\", policy=\"live\")\n wandb.save(\"checkpoint.*.pth\", policy=\"live\")\n\n\ndef filter_cfg_for_wandb(cfg, exclude=None):\n \"\"\"Remove unwanted entries for wandb config.\"\"\"\n\n def delete_(d: MutableMapping[str, Any], k: str):\n k = k.split(\".\", maxsplit=1)\n if len(k) == 1:\n del d[k[0]]\n else:\n delete_(d[k[0]], k[1])\n\n cfg = OmegaConf.to_container(cfg, resolve=True)\n if exclude is None:\n exclude = EXCLUDE_HPARAMS\n for key in exclude:\n delete_(cfg, key)\n return cfg\n\n\ndef find_run_by_name(name, output_dir: Union[Path, str] = None) -> Path:\n \"\"\"Find a run by wandb name/id by grepping all train.yaml files under a folder\"\"\"\n p = subprocess.run(\n [\n \"grep\",\n \"--include=train.yaml\",\n \"--exclude-dir=wandb\",\n \"--files-with-matches\",\n \"-R\", # Capital R -> follow symlinks\n name,\n \".\",\n ],\n capture_output=True,\n text=True,\n cwd=output_dir,\n check=True,\n )\n outputs = p.stdout.splitlines()\n if len(outputs) == 0:\n raise FileNotFoundError(name)\n if len(outputs) > 1:\n raise RuntimeError(f\"Multiple matches {name}: {outputs}\")\n return Path(outputs[0]).parent\n","repo_name":"baldassarreFe/iclr-osc-22","sub_path":"src/osc/wandb_utils.py","file_name":"wandb_utils.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"17381711561","text":"class Solution:\n def romanToInt(self, s: str) -> int:\n result = 0\n i = 0 #start\n data = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}\n save_num = 0 \n for a,b in zip(s,s[1:]):\n if data[a] < data[b]:\n result -= data[a]\n else :\n result += data[a]\n return result + data[s[-1]]","repo_name":"chogerlate/Learning_Stuff","sub_path":"Leetcode/Easy/13_romanToInt.py","file_name":"13_romanToInt.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42673078195","text":"#!/usr/bin/env python\n\n# Good for demonstrating Priority Queues. Runs the RoundRobin \n# demo for fast, the Deadline demo for Default, and spawns long\n# jobs for batch.\n\nimport os\nimport datetime\nimport time\n\nfrom optparse import OptionParser\n\nparser = OptionParser(usage=\"./PriorityQueue.py --gh HOSTNAME --gp PORT\")\n\nparser.add_option(\"--gh\", \"--grid_hostname\", dest=\"ghost\",\n\thelp=\"The hostname the client should listen on\",\n\tmetavar=\"HOSTNAME\", default=\"127.0.0.1\")\n\nparser.add_option(\"--gp\", \"--grid_port\", dest=\"gport\",\n\thelp=\"The port the client should listen on\",\n\tmetavar=\"PORT\", default = 8051)\n\n(options, args) = parser.parse_args()\n\nos.system(\n\t\"./client.py --gh %s --gp %s --username admin --password admin -s PriorityQueue\" % (options.ghost, options.gport)\n\t)\n\nos.system(\n\t\"./demos/RoundRobin.py --gh %s --gp %s -t FAST -s NOCHANGE\"\n\t% (options.ghost, options.gport)\n\t)\n\nos.system(\n\t\"./demos/Deadline.py --gh %s --gp %s -t DEFAULT -s NOCHANGE\"\n\t% (options.ghost, options.gport)\n\t)\n\ndeadline = (datetime.datetime.now() + datetime.timedelta(days=3)).strftime('%Y-%m-%d %H:%M:%S')\nos.system(\n\t'./client.py --gh %s --gp %s -e test.py -t BATCH -b 20000 -w 24:00:00 -d \"%s\" testfiles/f1000.txt testfiles/f1000.txt'\n\t% (options.ghost, options.gport, deadline)\n\t)\n\ndeadline = (datetime.datetime.now() + datetime.timedelta(days=20)).strftime('%Y-%m-%d %H:%M:%S')\nos.system(\n\t'./client.py --gh %s --gp %s -e test.py -t BATCH -b 50000 -w 10:00:00:00 -d \"%s\" testfiles/f1000.txt'\n\t% (options.ghost, options.gport, deadline)\n\t)\n","repo_name":"sritchie73/bad-boyz-cluster","sub_path":"src/demos/PriorityQueue.py","file_name":"PriorityQueue.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"588366968","text":"import numpy as np\nimport torch\n\nxy = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32)\nx_data = torch.from_numpy(xy[:, :-1])\n# [-1]加中括号拿出来是矩阵,不加是向量\ny_data = torch.from_numpy(xy[:, [-1]])\n\n\nclass Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear1 = torch.nn.Linear(8, 6)\n self.linear2 = torch.nn.Linear(6, 4)\n self.linear3 = torch.nn.Linear(4, 1)\n # 这是nn下的Sigmoid是一个模块没有参数,在function调用的Sigmoid是函数\n self.sigmoid = torch.nn.Sigmoid()\n # self.activate = torch.nn.ReLU() error all elements of input should be between 0 and 1\n\n def forward(self, x):\n x = self.sigmoid(self.linear1(x))\n x = self.sigmoid(self.linear2(x))\n x = self.sigmoid(self.linear3(x))\n return x\n\n\nmodel = Model()\n\ncriterion = torch.nn.BCELoss(reduction='mean')\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n\nfor epoch in range(100):\n y_pred = model(x_data)\n loss = criterion(y_pred, y_data)\n print(epoch, loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n","repo_name":"chwwhut/pytorchreview","sub_path":"MultipleDimension_1.py","file_name":"MultipleDimension_1.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9878206819","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/03/04\n# @Author : Wenhao Shan\n# @Dsec : Wraps of raise ActionError, to handle the exception on the server running\nfrom flask import render_template, jsonify\nfrom functools import wraps\nfrom infra.utils.error import ActionError\nfrom infra.tool.enum.server_enum import ResponseStatus, ResponseMsg\n\n\n# 自定义err处理装饰器(也可使用flask自带app.errorhandler的创建err handle)\ndef web_err(fun):\n @wraps(fun)\n def wrapper(*args, **kwargs):\n try:\n return fun(*args, **kwargs)\n except ActionError as e:\n err_dict = dict()\n err_dict[\"errMsg\"] = e.message\n return render_template(\"err.html\", **err_dict)\n return wrapper\n\n\n# http request error handle\ndef action_err(fun):\n @wraps(fun)\n def wrapper(*args, **kwargs):\n try:\n return fun(*args, **kwargs)\n except ActionError as e:\n res = {\n \"errCode\": ResponseStatus.Failed,\n \"errMsg\": e.message,\n \"obj\": {\n }\n }\n return jsonify(res)\n return wrapper\n","repo_name":"shanwenhao1/Kafka","sub_path":"infra/flask/err_handle.py","file_name":"err_handle.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32145125579","text":"import json\n\nimport requests\nimport yaml\nfrom fair_test import FairTest, FairTestEvaluation\n\n\nclass MetricTest(FairTest):\n metric_path = 'i1-data-knowledge-representation-structured'\n applies_to_principle = 'I1'\n title = 'Data uses a formal structured knowledge representation language'\n description = \"\"\"Maturity Indicator to test if the data uses a formal language broadly applicable for knowledge representation.\nThis particular test takes a broad view of what defines a 'knowledge representation language'; in this evaluation, anything that can be represented as structured data will be accepted\"\"\"\n author = 'https://orcid.org/0000-0002-1501-1082'\n metric_version = '0.1.0'\n topics = ['data', 'minimal compliance']\n test_test={\n 'https://w3id.org/ejp-rd/fairdatapoints/wp13/dataset/c5414323-eab1-483f-a883-77951f246972': 1,\n # 'https://doi.org/10.1594/PANGAEA.908011': 0,\n 'http://example.com': 0,\n }\n\n\n def evaluate(self, eval: FairTestEvaluation): \n g = eval.retrieve_metadata(eval.subject)\n if not isinstance(g, (list, dict)) and len(g) > 1:\n eval.info(f'Successfully found and parsed RDF metadata available at {eval.subject}. It contains {str(len(g))} triples')\n else:\n eval.failure(f\"No RDF metadata found at the subject URL {eval.subject}\")\n return eval.response()\n\n subject_uri = eval.extract_metadata_subject(g, eval.data['alternative_uris'])\n # Retrieve URI of the data in the RDF metadata\n data_res = eval.extract_data_subject(g, subject_uri)\n if len(eval.data['content_url']) < 1:\n eval.failure(\"Could not find the data URI in the subject metadata.\")\n\n\n # Check if structured data can be found at the data URI\n for value in eval.data['content_url']:\n eval.info(f'Found data URI: {value}. Try retrieving RDF')\n data_g = eval.retrieve_metadata(value)\n\n if not isinstance(data_g, (list, dict)) and len(data_g) > 0:\n eval.success(f'Successfully found and parsed RDF data. It contains {str(len(g))} triples')\n elif isinstance(data_g, (list, dict)) and len(data_g) > 0:\n eval.success(f'Successfully found and parsed structured data. It contains {str(len(g))} objects')\n\n else:\n eval.warn(f'No RDF data found for {value}, searching for JSON')\n try:\n r = requests.get(value, headers={'accept': 'application/json'})\n metadata = r.json()\n eval.data['metadata_json'] = metadata\n eval.success(f'Successfully found and parsed JSON data for {value}')\n except:\n eval.warn(f'No JSON metadata found for {value}, searching for YAML')\n try:\n r = requests.get(value, headers={'accept': 'text/yaml'})\n metadata = yaml.load(r.text, Loader=yaml.FullLoader)\n eval.data['metadata_yaml'] = metadata\n eval.success(f'Successfully found and parsed YAML data for {value}')\n except:\n eval.failure(f'No YAML metadata found for {value}')\n \n return eval.response()\n","repo_name":"MaastrichtU-IDS/fair-enough-metrics","sub_path":"metrics/i1_data_knowledge_representation_structured.py","file_name":"i1_data_knowledge_representation_structured.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"30957853815","text":"import imutils\nimport cv2\nimport pickle\n\n\nclass Vision(Thread):\n # Responsible for handling images from camera\n\n def __init__(self, camera_number, image_width, debug_mode, world):\n \"\"\"\n Init vision object\n :param camera_number: Number of the used camera\n :param image_width: Image width in pixels\n :param debug_mode: If True shows the image for debug\n :param world: World object\n \"\"\"\n\n '''\n self.colorsLimits = {'blue': [(101, 125, 81), (120, 255, 255)],\n 'green': [(32, 57, 106), (68, 216, 215)],\n 'red': [[(0, 111, 113), (7, 244, 189)], [(179, 229, 156), (179, 229, 156)]]}\n '''\n\n with open('../calibr.wr', 'rb') as input_file:\n self.colors_limits = pickle.load(input_file)\n\n print(self.colors_limits)\n\n self.camera = cv2.VideoCapture(camera_number)\n self.image_width = image_width\n self.debug_mode = debug_mode\n\n self.world = world\n\n self.world.init_all_balloons(self.colors_limits.keys())\n\n\n def update(self):\n \"\"\"\n Update world information from camera image\n \"\"\"\n (_, frame) = self.camera.read()\n\n frame = imutils.resize(frame, width=self.image_width)\n\n frame_height = len(frame)\n\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n for color, color_limits in self.colors_limits.iteritems():\n # Mask creation and handling\n if color == 'red':\n mask1 = cv2.inRange(hsv_frame, color_limits[0][0],\n color_limits[0][1])\n mask1 = cv2.erode(mask1, None, iterations=2)\n mask1 = cv2.dilate(mask1, None, iterations=2)\n mask2 = cv2.inRange(hsv_frame, color_limits[1][0],\n color_limits[1][1])\n mask2 = cv2.erode(mask2, None, iterations=2)\n mask2 = cv2.dilate(mask2, None, iterations=2)\n mask = mask2 + mask1\n else:\n mask = cv2.inRange(hsv_frame, color_limits[0], color_limits[1])\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n all_contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n detection = False\n\n if len(all_contours) > 0:\n contour = max(all_contours, key=cv2.contourArea)\n approx = cv2.approxPolyDP(contour,\n 0.01 * cv2.arcLength(contour, True),\n True)\n area = cv2.contourArea(contour)\n (_, _, _, height) = cv2.boundingRect(approx)\n\n M = cv2.moments(contour)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n if height > 10:\n position_x = (center[0] - (\n self.image_width / 2.0)) / float(\n (self.image_width / 2.0))\n position_y = (center[1] - (frame_height / 2.0)) / float(\n (frame_height / 2.0))\n height = height / float(frame_height)\n\n self.world.set_balloon(color, position_x, position_y,\n height,\n area)\n self.world.has_balloon = True\n detection = True\n\n if self.debug_mode:\n cv2.drawContours(frame, contour, -1, (255, 0, 0), 3)\n\n if detection is False:\n self.world.set_invisible_balloon(color)\n\n if self.debug_mode:\n frame = cv2.flip(frame, 1)\n cv2.imshow(\"Frame\", frame)\n\n\n def finish(self):\n \"\"\"\n Release the camera and destroy all windows.\n \"\"\"\n self.camera.release()\n cv2.destroyAllWindows()\n","repo_name":"gacra/warthog-faquinha","sub_path":"main_project/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6142603732","text":"import math\nimport design.vision.world_utils as utils\nimport numpy\n\n\ndef test_that_given_points_when_calculate_angle_then_angle_is_calculated():\n point1 = [10, 8]\n point2 = [-3, 2]\n angle = utils.calculate_angle(point1, point2)\n assert 0.43240777557053783 == angle\n\n\ndef test_that_given_angle_in_radians_when_define_cardinal_point_then_cardinal_point_is_defined():\n angle = - math.pi / 4\n cardinal_point = utils.define_cardinal_point(angle)\n assert \"N\" == cardinal_point\n\n\ndef test_than_given_points_when_calculate_norm_then_norm_is_calculated():\n point1 = [17, 21]\n point2 = [-3, -283]\n norm = utils.calculate_norm(point1[0], point1[1], point2[0], point2[1])\n assert 304.6571843892738 == norm\n\n\ndef test_that_given_array_when_eliminate_close_points_then_close_points_are_eliminated():\n array = numpy.array([[[666, 153]], [[709, 176]], [[686, 219]], [[643, 195]]])\n minimal_distance = 50\n new_array = (utils.eliminate_duplicated_points(array, minimal_distance))\n assert new_array == [(666, 153)]\n\n\ndef test_that_given_triangle_coordinates_when_calculate_triangle_shortest_edge_then_triangle_shortest_edge_is_given():\n triangle_coordinates = [(0, 0), (10, 0), (5, 20)]\n shortest_edge_coordinates = utils.triangle_shortest_edge(triangle_coordinates)\n assert shortest_edge_coordinates == [(0, 0), (10, 0)]\n\n\ndef test_that_given_contour_centroid_when_calculate_centroid_then_centroid_is_calculated():\n contour = numpy.array([[[0, 0]], [[0, 10]], [[10, 10]], [[10, 0]]])\n centroid = utils.calculate_centroid(contour)\n assert (5, 5) == centroid\n\n\ndef test_that_given_contour_when_calculate_box_area_then_area_is_calculated():\n contour = numpy.array([[[0, 0]], [[0, 10]], [[10, 10]], [[10, 0]]])\n area = utils.calculate_minimal_box_area(contour)\n assert 100 == area\n\n\ndef test_that_given_angle_in_radians_when_convert_angle_to_degrees_then_angle_is_converted():\n angle_in_radians = (- 3 * math.pi / 4)\n angle_in_degrees = utils.convert_to_degrees(angle_in_radians)\n assert 225.0 == angle_in_degrees\n\n\ndef test_that_given_obstacles_information_when_comparing_then_information_is_similar():\n information1 = [[[879, 428], 'O'], [[1146, 819], 'S'], [[1407, 435], 'N']]\n information2 = [[[880, 430], 'O'], [[1150, 820], 'S'], [[1410, 432], 'N']]\n assert utils.are_obstacles_information_similar(information1,\n information2)\n\n\ndef test_that_given_obstacles_information_when_comparing_then_information_is_not_similar():\n information1 = [[[500, 428], 'O'], [[1146, 819], 'S'], [[1407, 435], 'N']]\n information2 = [[[880, 430], 'O'], [[1150, 820], 'S'], [[1410, 432], 'N']]\n information3 = [[[880, 430], 'O'], [[1150, 820], 'N']]\n assert not utils.are_obstacles_information_similar(information1,\n information2)\n assert not utils.are_obstacles_information_similar(information1,\n information3)\n assert not utils.are_obstacles_information_similar(information2,\n information3)\n\n\ndef test_that_given_drawing_zone_information_when_comparing_then_information_is_similar():\n information1 = [(412, 414), (200, 395), (244, 500), (123, 372)]\n information2 = [(413, 415), (203, 396), (245, 505), (125, 375)]\n assert utils.are_drawing_zone_information_similar(information1,\n information2)\n\n\ndef test_that_given_drawing_zone_information_when_comparing_then_information_is_not_similar():\n information1 = [(412, 414), (200, 395), (244, 500), (123, 372)]\n information2 = [(413, 415), (203, 396), (245, 600), (125, 375)]\n assert not utils.are_drawing_zone_information_similar(information1,\n information2)\n\n\ndef test_that_given_two_robot_information_when_comparing_then_they_are_similar():\n information1 = [(123, 123), 123.00]\n information2 = [(124, 127), 124.00]\n assert utils.are_robot_information_similar(information1, information2)\n\n\ndef test_that_given_two_robot_information_when_comparing_then_they_are_not_similar():\n information1 = [(150, 123), 123.00]\n information2 = [(124, 200), 124.00]\n information3 = [(124, 400), 124.00]\n information4 = [(124, 400), 150.00]\n assert not utils.are_robot_information_similar(information1, information2)\n assert not utils.are_robot_information_similar(information1, information3)\n assert not utils.are_robot_information_similar(information2, information3)\n assert not utils.are_robot_information_similar(information4, information3)\n\n\ndef test_that_given_list_of_obstacles_information_when_comparing_them_then_best_information_is_given():\n obstacles_information = [[[[499, 432], 'O'], [[1150, 820], 'S'], [[1407, 435], 'N']],\n [[[500, 428], 'O'], [[1146, 819], 'S']],\n [[[505, 428], 'O'], [[1143, 819], 'S'], [[1402, 436], 'N']],\n [[[501, 428], 'O'], [[1146, 818], 'S'], [[1407, 435], 'N']],\n [[[1145, 815], 'S'], [[1405, 433], 'N']],\n [[[503, 428], 'O'], [[1147, 818], 'S'], [[1406, 432], 'N']]]\n new_information = utils.get_best_information(obstacles_information)\n assert new_information == [[[499, 432], 'O'], [[1150, 820], 'S'], [[1407, 435], 'N']]\n\n\ndef test_that_given_list_of_drawing_zone_information_when_comparing_them_then_best_information_is_given():\n drawing_zone_information = [[(412, 414), (200, 395), (244, 500), (123, 372)],\n [(413, 415), (203, 396), (245, 505), (125, 375)],\n [(412, 415), (202, 395), (244, 504), (122, 375)]]\n new_information = utils.get_best_information(drawing_zone_information)\n assert new_information == [(412, 414), (200, 395), (244, 500), (123, 372)]\n\n\ndef test_that_given_list_of_robot_information_when_comparing_them_then_best_information_is_given():\n robot_information = [[(124, 127), 124.00],\n [(124, 125), 125.10],\n [(125, 126), 122.00],\n [(100, 127), 124.00]]\n new_information = utils.get_best_information(robot_information)\n assert new_information == [(124, 127), 124.00]\n\n\ndef test_that_given_two_items_information_when_comparing_then_they_are_similar():\n information1 = [(412, 414), (200, 395), (244, 500), (123, 372)]\n information2 = [(413, 415), (203, 396), (245, 505), (125, 375)]\n information3 = [[[879, 428], 'O'], [[1146, 819], 'S'], [[1407, 435], 'N']]\n information4 = [[[880, 430], 'O'], [[1150, 820], 'S'], [[1410, 432], 'N']]\n information5 = [(123, 123), 123.00]\n information6 = [(124, 127), 124.00]\n assert utils.check_if_both_information_are_similar(information1,\n information2)\n assert utils.check_if_both_information_are_similar(information3,\n information4)\n assert utils.check_if_both_information_are_similar(information5,\n information6)\n assert utils.check_if_both_information_are_similar(information1,\n information1)\n assert utils.check_if_both_information_are_similar(information2,\n information2)\n assert utils.check_if_both_information_are_similar(information3,\n information3)\n assert utils.check_if_both_information_are_similar(information4,\n information4)\n assert utils.check_if_both_information_are_similar(information5,\n information5)\n assert utils.check_if_both_information_are_similar(information6,\n information6)\n\n\ndef test_that_given_two_items_information_when_comparing_then_they_are_not_similar():\n information1 = [(412, 414), (200, 395), (244, 500), (123, 372)]\n information2 = [(413, 415), (203, 396), (245, 600), (125, 375)]\n information3 = [[[500, 428], 'O'], [[1146, 819], 'S'], [[1407, 435], 'N']]\n information4 = [[[880, 430], 'O'], [[1150, 820], 'S'], [[1410, 432], 'N']]\n information5 = [[[880, 430], 'O'], [[1150, 820], 'N']]\n information6 = [(124, 400), 124.00]\n information7 = [(124, 400), 150.00]\n assert not utils.check_if_both_information_are_similar(information1,\n information2)\n assert not utils.check_if_both_information_are_similar(information3,\n information4)\n assert not utils.check_if_both_information_are_similar(information3,\n information5)\n assert not utils.check_if_both_information_are_similar(information4,\n information5)\n assert not utils.check_if_both_information_are_similar(information6,\n information7)\n\n\ndef test_that_given_close_points_in_list_when_eliminate_close_points_then_they_are_eliminated():\n circles = [(1018, 497), (1018, 497)]\n assert [[1018, 497]] == utils.eliminate_close_points_in_list(circles, 200)\n","repo_name":"AntoineGagne/design-3-glo","sub_path":"tests/vision/test_world_utils.py","file_name":"test_world_utils.py","file_ext":"py","file_size_in_byte":9578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"23515242518","text":"from django_filters import rest_framework as filters\n\nfrom bbbs.common.models import Tag\n\nfrom .models import Video\n\n\nclass VideoFilter(filters.FilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n field_name='tags__slug',\n to_field_name='slug',\n queryset=Tag.objects.all()\n )\n\n class Meta:\n model = Video\n fields = ['tags',]\n","repo_name":"dangerousmonk/bigBrothers-bigSisters-backend","sub_path":"bbbs/videos/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74426229384","text":"# 3. Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов. \n# Определить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников. \n# Выполнить подсчет средней величины дохода сотрудников.\nfilename = '/home/alexander/work/salary.txt'\nresult = []\nwith open(filename, 'r') as file:\n salary = 0\n cou = 0\n fams = []\n for line in file:\n words = [i for i in line.strip().split(' ')]\n fam = words[0]\n sal = float(words[1])\n if sal < 20000.0:\n fams.append(fam)\n salary += sal\n cou += 1\nprint(\"Средняя зарплата составляет: {}\".format(sal/cou))\nprint(\"Сотрудники с окладом меньше 20 тыс.: {}\".format(fams))\n","repo_name":"AlGetcel/lessons","sub_path":"lesson5/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28960566303","text":"from functools import partial\nfrom typing import Dict, Tuple, List, Union\nimport pyperclip\n\nfrom ceaser import CaesarCipher\nfrom buffer import Buffer, Text\nfrom manager import FileHandler\n\n\nclass Executor:\n def __init__(self) -> None:\n self.caesar: CaesarCipher = CaesarCipher([13, 47])\n self.buffer: Buffer = Buffer()\n self.file_handler: FileHandler = FileHandler()\n\n def encrypt(self) -> Dict[str, str] | None:\n text_to_encrypt = input(\"Write text to encrypt\\n> \")\n print(\"Which type do you want to use? \")\n print(*self.caesar.rot_types, sep=\"\\n\")\n\n try:\n chose_type = int(input(\"> \"))\n except ValueError:\n print(\"I'm sorry this type is unavailable.\\n\")\n return\n\n if chose_type not in self.caesar.rot_types or chose_type < 0:\n print(\"I'm sorry this type is unavailable.\\n\")\n return\n\n encrypt_text = self.caesar.code_encoder_decoder(\n text_to_encrypt, chose_type, \"encrypted\"\n )\n\n text = Text(**encrypt_text)\n self.buffer.add(text)\n print(text, \"Text is in the clipboard.\", sep=\"\\n\")\n pyperclip.copy(text.text)\n\n return encrypt_text\n\n def decrypt(self) -> Dict[str, str] | None:\n text_to_encrypt = input(\"Write text to decrypt\\n> \")\n print(\"Which type do you want to use? \")\n print(*self.caesar.rot_types, sep=\"\\n\")\n\n try:\n chose_type = int(input(\"> \"))\n except ValueError:\n print(\"I'm sorry this type is unavailable.\")\n return\n\n if chose_type not in self.caesar.rot_types or chose_type < 0:\n print(\"I'm sorry this type is unavailable.\")\n return\n\n encrypt_text = self.caesar.code_encoder_decoder(\n text_to_encrypt, -chose_type, \"decrypted\"\n )\n\n text = Text(**encrypt_text)\n self.buffer.add(text)\n print(text, \"\\n\")\n\n return encrypt_text\n\n def check_changes(self) -> bool:\n \"\"\"Function checks changes in read file and tests_buffer\"\"\"\n return self.buffer.convert_to_arr_of_dicts() == self.file_handler.content\n\n def exit(self) -> None:\n if not self.check_changes():\n pass\n return\n\n def load_file(self) -> None:\n self.file_handler._name_file = None\n\n if not self.file_handler.get_file_name_from_user():\n return\n\n content: List[Dict[str, str]] = self.file_handler.open()\n\n if content:\n self.buffer.add_list_of_dict(content)\n print(\"Loaded:\", *content, sep=\"\\n\")\n\n def save_to_file(self) -> None:\n if self.file_handler.name_file:\n name_file = input(\n f\"Do you want to append content to: {self.file_handler.name_file}?[yes/no]\\n> \"\n )\n if name_file.upper() == \"NO\":\n self.file_handler._name_file = None\n\n self.file_handler.save(self.buffer.convert_to_arr_of_dicts())\n\n def print_buffer(self) -> None:\n if not self.buffer.data:\n print(\"History is empty!\")\n return\n self.buffer.print_buffer()\n\n\nclass Menu:\n def __init__(self) -> None:\n self.executor = Executor()\n self.options: Dict[int, Tuple[str, partial]] = {\n 1: (\"Encryption\", partial(self.executor.encrypt)),\n 2: (\"Decryption\", partial(self.executor.decrypt)),\n 3: (\"Load file\", partial(self.executor.load_file)),\n 4: (\"Save\", partial(self.executor.save_to_file)),\n 5: (\"History\", partial(self.executor.print_buffer)),\n 6: (\"Exit\", partial(self.executor.exit)),\n }\n\n def show(self) -> None:\n \"\"\"The show displays tests_menu.\"\"\"\n menu = [f\"{key}: {value[0]}\" for key, value in self.options.items()]\n print(\"\\nMenu: \")\n print(*menu, sep=\"\\n\")\n\n def execute(self, choice: int) -> Union[None | Dict[str, str]]:\n \"\"\"\n The executor chooses what should be executed.\n If the 'choice' does not exist, an error message will be displayed, and None will be returned.\n \"\"\"\n exe = self.options.get(choice)\n if not exe:\n self.__show_error()\n return\n return exe[1]()\n\n def __show_error(self) -> None:\n \"\"\"The method informs user about wrong choice.\"\"\"\n print(\"This option doesn't exist.\\n\")\n return\n\n def is_exit(self, key: int) -> bool:\n \"\"\"The method checks, if the value of the key is equal to 'Exit'\"\"\"\n check_exit = self.options.get(key, None)\n return check_exit and check_exit[0] == \"Exit\"\n","repo_name":"MatRos-sf/cipher","sub_path":"src/menu/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32634589820","text":"import asyncio\nimport os\nimport youtube_dl as yt\nimport discord\nfrom requests import get\n\nYDL_OPTIONS = {\n 'format': 'bestaudio/best',\n 'restrictfilenames': True,\n 'noplaylist': True,\n 'nocheckcertificate': True,\n 'ignoreerrors': False,\n 'logtostderr': False,\n 'quiet': True,\n 'no_warnings': True,\n 'default_search': 'auto',\n 'source_address': '0.0.0.0', # bind to ipv4 since ipv6 addresses cause issues sometimes\n}\nasync def search(arg):\n\n with yt.YoutubeDL(YDL_OPTIONS) as ydl:\n try:\n get(arg)\n except:\n video = ydl.extract_info(f\"ytsearch:{arg}\", download=False)['entries'][0]\n else:\n video = ydl.extract_info(arg, download=False)\n data = {'url': video['webpage_url'],'title': video['title'] }\n return data\n\ndef readToken():\n file = open('token', 'r')\n token = file.read()\n file.close()\n return token\n\nclass Youtube:\n DISCORD_TOKEN = readToken()\n\n intents = discord.Intents().all()\n client = discord.Client(intents=intents)\n\n yt.utils.bug_reports_message = lambda: ''\n\n ffmpeg_options = {\n 'options': '-vn'\n }\n\n\n ytdl = yt.YoutubeDL(YDL_OPTIONS)\n\nclass YTDLSource(discord.PCMVolumeTransformer):\n def __init__(self, source, *, data, volume=0.5):\n super().__init__(source, volume)\n self.data = data\n self.title = data.get('title')\n self.url = \"\"\n\n\n @classmethod\n async def from_url(cls, find, *, loop=None, stream=False):\n loop = loop or asyncio.get_event_loop()\n data = await loop.run_in_executor(None, lambda: Youtube.ytdl.extract_info(find, download=not stream))\n if 'entries' in data:\n # take first item from a playlist\n data = data['entries'][0]\n filename = data['title'] if stream else Youtube.ytdl.prepare_filename(data)\n return filename","repo_name":"GaberRB/tengu_bot_discord","sub_path":"src/apis/Youtube/Youtube.py","file_name":"Youtube.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8127223860","text":"from app import db\nfrom flask_bcrypt import Bcrypt\nimport jwt\nfrom datetime import datetime, timedelta\nfrom flask import request, current_app\n\n\nclass User(db.Model):\n \"\"\"User class creates an instance of a user\"\"\"\n \n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(256), nullable=False, unique=True)\n email = db.Column(db.String(256), nullable=False, unique=True)\n password = db.Column(db.String(256), nullable=False)\n businesses = db.relationship(\n 'Business', order_by='Business.businessid', cascade=\"all, delete-orphan\")\n\n reviews = db.relationship(\n 'Review', order_by='Review.id', cascade=\"all, delete-orphan\")\n\n\n def __init__(self, username, email, password):\n \n self.username = username\n self.email=email\n\n \"\"\"Hash password\"\"\"\n self.password = Bcrypt().generate_password_hash(password).decode('utf-8')\n\n def password_is_valid(self, password):\n \"\"\" Checks if provided password matches the hashed stored password \"\"\"\n return Bcrypt().check_password_hash(self.password, password)\n\n \n def save(self):\n \"\"\"Adds the created users details into the users table\"\"\"\n\n db.session.add(self)\n db.session.commit()\n\n @staticmethod\n def get_all():\n return User.query.all()\n\n def generate_token(self, user_id):\n \"\"\" generates the token to be used for authentification\"\"\"\n\n try:\n \"\"\"set payload, and indicate expiry duration of the token\"\"\"\n payload = {\n \n 'exp': datetime.utcnow() + timedelta(minutes=15),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n \"\"\"create the token\"\"\"\n jwt_string = jwt.encode(\n payload,\n current_app.config.get('SECRET'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as e:\n return str(e)\n\n \n @staticmethod\n def get_token():\n\n auth_header = request.headers.get('Authorization')\n\n if auth_header:\n access_token = auth_header.split(' ')[1]\n else:\n access_token = 'Invalid Token'\n return access_token\n\n @staticmethod\n def validate_token():\n \"\"\" Checks if token used is valid\"\"\"\n\n access_token = User.get_token()\n user_id = User.decode_token(access_token)\n blacklisttoken = Tokens.query.filter_by(\n token=access_token, status='blacklisted').first()\n\n res = {'access_token':User.get_token(),\n 'user_id': User.decode_token(access_token),\n 'decodable_token':access_token and isinstance(user_id, str),\n 'blacklisted_token':access_token and blacklisttoken}\n return res\n\n\n\n @staticmethod\n def decode_token(token):\n \"\"\"Decodes the access token\"\"\"\n try:\n \"\"\"Use SECRET variable used in configuration to decode token\"\"\"\n payload = jwt.decode(token, current_app.config.get('SECRET'))\n return payload['sub']\n except jwt.ExpiredSignatureError: \n return \"Kindly login to get a new token. Token is Expired\"\n\n except jwt.InvalidTokenError:\n return \"Please register or login, Token is Invalid\"\n\nclass Tokens(db.Model):\n __tablename__ = 'blacklist'\n\n id = db.Column(db.Integer, primary_key=True)\n token = db.Column(db.String(), nullable=False)\n status = db.Column(db.String(), nullable=False) #active, blacklisted\n date_created = db.Column(db.DateTime, default=db.func.current_timestamp())\n \n def __init__(self, token, status):\n \n self.token = token\n self.status = status\n \n def save(self):\n \"\"\"Adds the token details into the blacklists table\"\"\"\n\n db.session.add(self)\n db.session.commit()\n\n @staticmethod\n def get_all():\n return Tokens.query.all()\n\n\n\nclass Business(db.Model): \n\n \"\"\"Business Class Creates an instance of business\"\"\"\n __tablename__ = 'businesses'\n\n businessid = db.Column(db.Integer, primary_key=True)\n business_name = db.Column(db.String(255))\n location = db.Column(db.String(255))\n about = db.Column(db.String(255))\n category = db.Column(db.String(255))\n\n \"\"\"store modification timestamps\"\"\"\n \n date_created = db.Column(db.DateTime, default=db.func.current_timestamp())\n date_modified = db.Column(\n db.DateTime, default=db.func.current_timestamp(),\n onupdate = db.func.current_timestamp())\n\n created_by = db.Column(db.Integer, db.ForeignKey(User.id))\n reviews = db.relationship(\n 'Review', order_by='Review.id', cascade=\"all, delete-orphan\")\n\n \n def __init__(self,business_name, about, location, category, created_by):\n\n self.business_name = business_name\n self.location=location\n self.about=about\n self.category=category\n self.created_by=category\n self.reviews= []\n\n #Save user who has created the business\n\n self.created_by = created_by\n\n def save(self):\n \"\"\" This method adds the instance of the business created into businesses table\"\"\"\n db.session.add(self)\n db.session.commit()\n\n @staticmethod\n def get_all_auth(user_id):\n \"\"\" This method retrieves all busineses for the particular logged in user\"\"\"\n\n return Business.query.filter_by(created_by = user_id)\n\n @staticmethod\n def get_all():\n \"\"\" This method retrieves all busineses\"\"\"\n return Business.query.all()\n \n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\n @property\n def serialize(self):\n \"\"\"Serialize.\"\"\"\n obj = {\n \"id\":self.businessid,\n \"name\":self.business_name,\n \"category\":self.category,\n \"location\":self.location\n }\n\n return obj\n\n\nclass Review(db.Model):\n \"\"\"Reviews class creates an instance of a review\"\"\"\n \n __tablename__ ='reviews'\n\n id=db.Column(db.Integer, primary_key=True)\n content=db.Column(db.String(255))\n \n created_by = db.Column(db.Integer, db.ForeignKey(User.id))\n \n date_created = db.Column(db.DateTime, default=db.func.current_timestamp())\n date_modified = db.Column(\n db.DateTime, default=db.func.current_timestamp(),\n onupdate = db.func.current_timestamp())\n\n\n businessid = db.Column(db.Integer, db.ForeignKey(Business.businessid))\n\n def __init__(self, content, created_by, businessid):\n\n self.content = content\n\n self.created_by = created_by\n self.businessid = businessid\n\n \n def save(self):\n \"\"\" This method adds the instance of the review created into reviews table\"\"\"\n db.session.add(self)\n db.session.commit()\n\n\n def delete(self):\n db.session.delete(self)\n db.session.commit() \n\n @staticmethod\n def get_all(businessid):\n \"\"\" This method retrieves all the reviews of a business\"\"\"\n return Review.query.filter_by(businessid = businessid)\n","repo_name":"kzyangiro/WeConnect","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3077708708","text":"\"\"\"The init file of the package.\"\"\"\n\nfrom typing import Any, Dict\n\nfrom sphinx.application import Sphinx\n\nfrom .facebook import Facebook, facebook_node\nfrom .linkedin import Linkedin, linkedin_node\nfrom .mastodon import Mastodon, mastodon_node\nfrom .twitter import Twitter, twitter_node\nfrom .utils import _NODE_VISITORS\n\n__version__ = \"0.0.0\"\n__author__ = \"Pierrick Rambaud\"\n__email__ = \"pierrick.rambaud49@gmail.com\"\n\n\ndef setup(app: Sphinx) -> Dict[str, Any]:\n \"\"\"Setup Sphinx application.\"\"\"\n # add the node directives to the build\n socials = {\n \"mastodon\": [mastodon_node, Mastodon],\n \"facebook\": [facebook_node, Facebook],\n \"linkedin\": [linkedin_node, Linkedin],\n \"twitter\": [twitter_node, Twitter],\n }\n\n for platform, nodes in socials.items():\n app.add_node(nodes[0], **_NODE_VISITORS) # type: ignore\n app.add_directive(platform, nodes[1])\n\n # add the javascript required by some providers\n js_params = {\"async\": \"async\", \"charset\": \"utf-8\"}\n app.add_js_file(\"https://platform.twitter.com/widgets.js\", **js_params) # type: ignore\n\n return {\n \"version\": __version__,\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }\n","repo_name":"12rambau/sphinx-social","sub_path":"sphinx_social/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"8456204833","text":"from pathlib import Path\n\n\ndef __list_of_calories() -> list[int]:\n \"\"\"Reads the input file into blocks of calories in the elve's backpack\"\"\"\n input_path = Path(__file__).parent / \"input.txt\"\n with open(input_path, \"r\", encoding=\"utf-8\") as fp:\n blocks = fp.read().split(\"\\n\\n\")\n\n total_calories = [sum(int(c) for c in b.strip().split(\"\\n\")) for b in blocks]\n\n return total_calories\n\n\ndef find_most_calories() -> int:\n \"\"\"Finds the most calories in the input list.\"\"\"\n total_calories = __list_of_calories()\n\n return max(total_calories)\n\n\ndef find_topmost_calories(items: int) -> list[int]:\n \"\"\"Finds the topmost `items` calories.\"\"\"\n top_calories: list(int) = []\n total_calories = __list_of_calories()\n\n for _ in range(0, items):\n (idx, calories) = max(enumerate(total_calories), key=lambda v: v[1])\n total_calories[idx] = 0\n top_calories += [calories]\n\n return top_calories\n","repo_name":"moritzscholz/aoc2022","sub_path":"src/day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17339378551","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport numpy as np\nimport random\nfrom net.multimodal.experiment_db.experiment_db_setup import Base, Experiment\n\nengine = create_engine('sqlite:///experiments.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\nhidden_dims = [500, 800, 1000, 1200]\n\nn_exps = 48\n\n# Global Loss\nconfig = {}\nconfig['use_local'] = 0.\nconfig['use_global'] = 1.\nconfig['use_associat'] = 0.\nconfig['use_mil'] = False\nconfig['use_finetune_cnn'] = False\nconfig['use_finetune_w2v'] = False\nconfig['update_rule'] = 'sgd'\nconfig['thrglobalscore'] = True\nconfig['done'] = False\nfor i in range(n_exps):\n reg = 10 ** random.uniform(-8, 2) # regularization\n lr = 10 ** random.uniform(-8, 0) # learning rate\n hd = random.sample(hidden_dims, 1)[0] # choose one element from hidden dims\n global_margin = random.randint(1, 5) * 10 # choose global margin\n\n config['reg'] = reg\n config['learning_rate'] = lr\n config['hidden_dim'] = hd\n config['global_margin'] = global_margin\n config['priority'] = np.round(np.random.uniform(0., 1.), 4)\n\n s = Experiment(**config)\n session.add(s) # add row\n\n\nsession.commit()","repo_name":"bbugs/sty_net","sub_path":"net/multimodal/experiment_db/exp_db_populator_global.py","file_name":"exp_db_populator_global.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8099607764","text":"from openerp import _, api, fields, models\nfrom openerp.exceptions import Warning as UserError\n\n\nclass HrEquipmentRequest(models.Model):\n _name = \"hr.equipment_request\"\n _description = \"Employee Equipment Request\"\n _inherit = [\n \"mail.thread\",\n \"tier.validation\",\n \"base.sequence_document\",\n \"base.workflow_policy_object\",\n \"base.cancel.reason_common\",\n \"base.terminate.reason_common\",\n ]\n _state_from = [\"draft\", \"confirm\"]\n _state_to = [\"approve\"]\n\n @api.model\n def _default_company_id(self):\n return self.env.user.company_id.id\n\n @api.model\n def _default_employee_id(self):\n employees = self.env.user.employee_ids\n if len(employees) > 0:\n return employees[0].id\n\n @api.multi\n def _compute_policy(self):\n _super = super(HrEquipmentRequest, self)\n _super._compute_policy()\n\n name = fields.Char(\n string=\"# Document\",\n default=\"/\",\n required=True,\n copy=False,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n company_id = fields.Many2one(\n string=\"Company\",\n comodel_name=\"res.company\",\n copy=True,\n required=True,\n default=lambda self: self._default_company_id(),\n )\n date_request = fields.Date(\n string=\"Date Request\",\n copy=True,\n required=True,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n employee_id = fields.Many2one(\n string=\"Employee\",\n comodel_name=\"hr.employee\",\n copy=True,\n default=lambda self: self._default_employee_id(),\n required=True,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n type_id = fields.Many2one(\n string=\"Type\",\n comodel_name=\"hr.equipment_request_type\",\n copy=True,\n required=True,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n reason_id = fields.Many2one(\n string=\"Reason\",\n comodel_name=\"hr.equipment_request_reason\",\n copy=True,\n required=True,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n allowed_product_categ_ids = fields.Many2many(\n string=\"Allowed Product Categories\",\n comodel_name=\"product.category\",\n related=\"type_id.allowed_product_categ_ids\",\n )\n allowed_product_ids = fields.Many2many(\n string=\"Allowed Products\",\n comodel_name=\"product.product\",\n related=\"type_id.allowed_product_ids\",\n )\n department_id = fields.Many2one(\n string=\"Department\",\n comodel_name=\"hr.department\",\n copy=False,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n manager_id = fields.Many2one(\n string=\"Manager\",\n comodel_name=\"hr.employee\",\n copy=False,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n job_id = fields.Many2one(\n string=\"Job Position\",\n comodel_name=\"hr.job\",\n copy=False,\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n detail_ids = fields.One2many(\n string=\"Details\",\n comodel_name=\"hr.equipment_request_detail\",\n copy=True,\n inverse_name=\"request_id\",\n readonly=True,\n states={\n \"draft\": [\n (\"readonly\", False),\n ],\n },\n )\n procurement_group_id = fields.Many2one(\n string=\"Procurement Group\",\n comodel_name=\"procurement.group\",\n copy=False,\n readonly=True,\n )\n note = fields.Text(\n string=\"Note\",\n copy=True,\n )\n state = fields.Selection(\n string=\"State\",\n selection=[\n (\"draft\", \"Draft\"),\n (\"confirm\", \"Waiting for Approval\"),\n (\"approve\", \"Procurement Planning\"),\n (\"open\", \"Waiting for Realization\"),\n (\"done\", \"Done\"),\n (\"cancel\", \"Cancelled\"),\n ],\n default=\"draft\",\n copy=False,\n compute=False,\n required=True,\n readonly=True,\n )\n # Log Fields\n confirm_date = fields.Datetime(\n string=\"Confirm Date\",\n readonly=True,\n copy=False,\n )\n confirm_user_id = fields.Many2one(\n string=\"Confirmed By\",\n comodel_name=\"res.users\",\n readonly=True,\n copy=False,\n )\n open_date = fields.Datetime(\n string=\"Open Date\",\n readonly=True,\n copy=False,\n )\n open_user_id = fields.Many2one(\n string=\"Opened By\",\n comodel_name=\"res.users\",\n readonly=True,\n copy=False,\n )\n done_date = fields.Datetime(\n string=\"Finish Date\",\n readonly=True,\n copy=False,\n )\n done_user_id = fields.Many2one(\n string=\"Finished By\",\n comodel_name=\"res.users\",\n readonly=True,\n copy=False,\n )\n cancel_date = fields.Datetime(\n string=\"Cancel Date\",\n readonly=True,\n copy=False,\n )\n cancel_user_id = fields.Many2one(\n string=\"Cancelled By\",\n comodel_name=\"res.users\",\n readonly=True,\n copy=False,\n )\n\n # Policy Field\n confirm_ok = fields.Boolean(\n string=\"Can Confirm\",\n compute=\"_compute_policy\",\n )\n open_ok = fields.Boolean(\n string=\"Can Open\",\n compute=\"_compute_policy\",\n )\n done_ok = fields.Boolean(\n string=\"Can Finished\",\n compute=\"_compute_policy\",\n )\n restart_approval_ok = fields.Boolean(\n string=\"Can Restart Approval\",\n compute=\"_compute_policy\",\n )\n cancel_ok = fields.Boolean(\n string=\"Can Cancel\",\n compute=\"_compute_policy\",\n )\n restart_ok = fields.Boolean(\n string=\"Can Restart\",\n compute=\"_compute_policy\",\n )\n\n @api.multi\n def action_confirm(self):\n for document in self:\n document.write(document._prepare_confirm_data())\n document.request_validation()\n\n @api.multi\n def action_approve(self):\n for document in self:\n document.write(document._prepare_approve_data())\n\n @api.multi\n def action_open(self):\n for document in self:\n document.write(document._prepare_open_data())\n document._create_procurement_order()\n\n @api.multi\n def action_done(self):\n for document in self:\n if not document._check_detail_state():\n return False\n document.write(document._prepare_done_data())\n\n @api.multi\n def action_cancel(self):\n for document in self:\n document.write(document._prepare_cancel_data())\n document.restart_validation()\n\n @api.multi\n def action_restart(self):\n for document in self:\n document.write(document._prepare_restart_data())\n\n @api.multi\n def _prepare_confirm_data(self):\n self.ensure_one()\n return {\n \"state\": \"confirm\",\n \"confirm_date\": fields.Datetime.now(),\n \"confirm_user_id\": self.env.user.id,\n }\n\n @api.multi\n def _prepare_open_data(self):\n self.ensure_one()\n return {\n \"state\": \"open\",\n \"open_date\": fields.Datetime.now(),\n \"open_user_id\": self.env.user.id,\n }\n\n @api.multi\n def _prepare_approve_data(self):\n self.ensure_one()\n ctx = self.env.context.copy()\n ctx.update(\n {\n \"ir_sequence_date\": self.date_request,\n }\n )\n sequence = self.with_context(ctx)._create_sequence()\n pg = self._create_procurement_group(sequence)\n return {\n \"state\": \"approve\",\n \"name\": sequence,\n \"procurement_group_id\": pg.id,\n \"approve_date\": fields.Datetime.now(),\n \"approve_user_id\": self.env.user.id,\n }\n\n @api.multi\n def _prepare_done_data(self):\n self.ensure_one()\n return {\n \"state\": \"done\",\n \"done_date\": fields.Datetime.now(),\n \"done_user_id\": self.env.user.id,\n }\n\n @api.multi\n def _prepare_cancel_data(self):\n self.ensure_one()\n return {\n \"state\": \"cancel\",\n \"cancel_date\": fields.Datetime.now(),\n \"cancel_user_id\": self.env.user.id,\n }\n\n @api.multi\n def _prepare_restart_data(self):\n self.ensure_one()\n return {\n \"state\": \"draft\",\n \"confirm_date\": False,\n \"confirm_user_id\": False,\n \"approve_date\": False,\n \"approve_user_id\": False,\n \"open_date\": False,\n \"open_user_id\": False,\n \"done_date\": False,\n \"done_user_id\": False,\n \"cancel_date\": False,\n \"cancel_user_id\": False,\n \"cancel_reason_id\": False,\n }\n\n @api.multi\n def _create_procurement_group(self, name):\n self.ensure_one()\n obj_pg = self.env[\"procurement.group\"]\n return obj_pg.create(self._prepare_procurement_group_data(name))\n\n @api.multi\n def _check_detail_state(self):\n self.ensure_one()\n for detail in self.detail_ids:\n if detail.state != \"done\":\n error_msg = \"Please finished all procurement\"\n raise UserError(error_msg)\n return True\n\n @api.multi\n def _prepare_procurement_group_data(self, name):\n return {\n \"name\": name,\n \"move_type\": \"direct\",\n }\n\n @api.multi\n def _create_procurement_order(self):\n self.ensure_one()\n for detail in self.detail_ids:\n detail._create_procurement_order()\n\n @api.onchange(\n \"employee_id\",\n )\n def onchange_department_id(self):\n self.department_id = False\n if self.employee_id:\n self.department_id = self.employee_id.department_id\n\n @api.onchange(\n \"employee_id\",\n )\n def onchange_manager_id(self):\n self.manager_id = False\n if self.employee_id:\n self.manager_id = self.employee_id.parent_id\n\n @api.onchange(\n \"employee_id\",\n )\n def onchange_job_id(self):\n self.job_id = False\n if self.employee_id:\n self.job_id = self.employee_id.job_id\n\n @api.multi\n def unlink(self):\n strWarning = _(\"You can only delete data on draft state\")\n for document in self:\n if document.state != \"draft\":\n if not self.env.context.get(\"force_unlink\", False):\n raise UserError(strWarning)\n _super = super(HrEquipmentRequest, self)\n _super.unlink()\n\n @api.multi\n def validate_tier(self):\n _super = super(HrEquipmentRequest, self)\n _super.validate_tier()\n for document in self:\n if document.validated:\n document.action_approve()\n\n @api.multi\n def restart_validation(self):\n _super = super(HrEquipmentRequest, self)\n _super.restart_validation()\n for document in self:\n document.request_validation()\n\n @api.multi\n def name_get(self):\n result = []\n for record in self:\n if record.name == \"/\":\n name = \"*\" + str(record.id)\n else:\n name = record.name\n result.append((record.id, name))\n return result\n","repo_name":"open-synergy/opnsynid-hr-equipment","sub_path":"hr_equipment/models/hr_equipment_request.py","file_name":"hr_equipment_request.py","file_ext":"py","file_size_in_byte":11865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28861716407","text":"import esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome import automation\nfrom esphome.components import i2c, time\nfrom esphome.const import CONF_ID, CONF_SLEEP_DURATION\n\nDEPENDENCIES = ['i2c']\n\nCONF_I2C_ADDR = 0x51\n\nbm8563 = cg.esphome_ns.namespace('bm8563')\nBM8563 = bm8563.class_('BM8563', cg.Component, i2c.I2CDevice)\nWriteAction = bm8563.class_(\"WriteAction\", automation.Action)\nReadAction = bm8563.class_(\"ReadAction\", automation.Action)\nSleepAction = bm8563.class_(\"SleepAction\", automation.Action)\n\nCONFIG_SCHEMA = time.TIME_SCHEMA.extend({\n cv.GenerateID(): cv.declare_id(BM8563),\n cv.Optional(CONF_SLEEP_DURATION): cv.positive_time_period_milliseconds,\n}).extend(cv.COMPONENT_SCHEMA).extend(i2c.i2c_device_schema(CONF_I2C_ADDR))\n\n@automation.register_action(\n \"bm8563.write_time\",\n WriteAction,\n cv.Schema(\n {\n cv.GenerateID(): cv.use_id(BM8563),\n }\n ),\n)\nasync def bm8563_write_time_to_code(config, action_id, template_arg, args):\n var = cg.new_Pvariable(action_id, template_arg)\n await cg.register_parented(var, config[CONF_ID])\n return var\n\n@automation.register_action(\n \"bm8563.apply_sleep_duration\",\n SleepAction,\n automation.maybe_simple_id(\n {\n cv.GenerateID(): cv.use_id(BM8563),\n }\n ),\n)\nasync def bm8563_apply_sleep_duration_to_code(config, action_id, template_arg, args):\n var = cg.new_Pvariable(action_id, template_arg)\n await cg.register_parented(var, config[CONF_ID])\n return var\n\n@automation.register_action(\n \"bm8563.read_time\",\n ReadAction,\n automation.maybe_simple_id(\n {\n cv.GenerateID(): cv.use_id(BM8563),\n }\n ),\n)\nasync def bm8563_read_time_to_code(config, action_id, template_arg, args):\n var = cg.new_Pvariable(action_id, template_arg)\n await cg.register_parented(var, config[CONF_ID])\n return var\n\nasync def to_code(config):\n var = cg.new_Pvariable(config[CONF_ID])\n await cg.register_component(var, config)\n await i2c.register_i2c_device(var, config)\n await time.register_time(var, config)\n if CONF_SLEEP_DURATION in config:\n cg.add(var.set_sleep_duration(config[CONF_SLEEP_DURATION]))\n","repo_name":"sebirdman/m5paper_esphome","sub_path":"custom_components/bm8563/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"62"} +{"seq_id":"33438342440","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 7 13:37:56 2020\n\n@author: Achuth MG\n\"\"\"\n\n\"\"\" operator precedence: \"\"\"\n\n\"\"\"\n1. parenthesis\n2. power\n3. Multiplication Division and remainder\n4. Addition and subtraction\n4. Left to right\n\n\"\"\"\n\nx = (2+1) ** 3 * 4 + 10\n\ny = 3 * 4 + 10 ** (2+1) \n","repo_name":"Amithmg6/FinestStatsModel","sub_path":"Python/beginner_course/operator_precedence.py","file_name":"operator_precedence.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38171410885","text":"def makeMap(filename):\n map = []\n with open(filename) as f:\n for line in f:\n row = [int(height) for height in list(line.strip())]\n map.append(row.copy())\n return map\n\ndef findLowPoints(map):\n low_points = []\n adjacent = [(1,0), (-1,0), (0,1), (0,-1)]\n for i in range(len(map)):\n for j in range(len(map[i])):\n neighbors = []\n for elem in adjacent:\n x = i + elem[0]\n y = j + elem[1]\n if 0 <= x < len(map) and 0 <= y < len(map[0]):\n neighbors.append(map[x][y])\n if min(neighbors) > map[i][j]:\n low_points.append((i, j))\n return low_points\n\ndef findBasins(map, low_points):\n visited = set()\n adjacent = [(1,0), (-1,0), (0,1), (0,-1)]\n not_basin = 9\n basins = []\n for low_point in low_points:\n stack = [low_point]\n visited.add(low_point)\n basin_size = 1\n while len(stack) > 0:\n current = stack.pop()\n i, j = current[0], current[1]\n for elem in adjacent:\n x = i + elem[0]\n y = j + elem[1]\n if 0 <= x < len(map) and 0 <= y < len(map[0]):\n if map[x][y] != not_basin and (x, y) not in visited:\n stack.append((x, y))\n visited.add((x, y))\n basin_size += 1\n basins.append(basin_size)\n return basins\n\ndef threeLarBasins(basins):\n basins.sort(reverse=True)\n return basins[0] * basins[1] * basins[2]\n\nmap = makeMap('input1209.txt')\nlow_points = findLowPoints(map)\nbasins = findBasins(map, low_points)\nres = threeLarBasins(basins)\nprint(res)","repo_name":"summerm104/AdventOfCode2021","sub_path":"day9_2.py","file_name":"day9_2.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"27756053687","text":"import requests, json\n\nwith open(\"credentials.json\", \"r\", encoding=\"utf-8\") as credentialsFile:\n credentials = json.load(credentialsFile)\n\nbaseUrl = \"https://flyviking.net/api/index.php?\"\napiKey = credentials[\"websiteKey\"]\n\ndoNotTrack = False # Only for testing purposes.\n\ndef fileQuery(q):\n r = requests.get(f\"{baseUrl}/core/search&q={q}&type=downloads_file&search_and_or=and&doNotTrack={doNotTrack}&key={apiKey}\")\n\n json_data = r.json()\n if json_data[\"totalResults\"] == 1:\n result = getFileById(json_data[\"results\"][0][\"itemId\"])\n return result\n else:\n return json_data[\"totalResults\"]\n\ndef getFileById(i):\n r = requests.get(f\"{baseUrl}/downloads/files/{i}&key={apiKey}\")\n\n json_data = r.json()\n if \"errorCode\" in json_data:\n return False\n else:\n return json_data\n","repo_name":"JoramD0/FlyVikingBot","sub_path":"websiteInterface.py","file_name":"websiteInterface.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29869508843","text":"# Raspberry Pi Alarm Clock\n# 2014, Ismail Uddin\n# www.scienceexposure.com\n\nimport time\nimport RPi.GPIO as GPIO\nfrom buzzer import buzz\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nresponse = raw_input(\"Please input the time for the alarm in format HHMM: \\n\")\n\nprint(\"Alarm has been set for %s hrs\" % response)\nbuzz(500,0.1)\n\nalarm = int(response)\nawake = 0\n\ntry:\n # Loop to continuously check time, buzz the buzzer for the set alarm time\n while True:\n # Continually get's the time as an integer value\n curr_time = int(time.strftime(\"%H%M\"))\n\n # Buzzes the buzzer when the time reaches the set alarm time\n if curr_time == alarm:\n buzz(10,0.5)\n time.sleep(0.25)\n buzz(20,0.5)\n time.sleep(0.25)\n awake = 1\n\n # Snoozes the alarm for 8 minutes from the current time\n # Only works whilst the alarm is buzzing\n if GPIO.input(25) == 0 and awake == 1:\n alarm += 8\n awake = 0\n print(alarm)\n\n # If alarm continues past the set alarm time without being\n # snoozed, the alarm time is changed to the current time.\n # This ensures the alarm buzzes continuously until the\n # snooze button is pressed.\n elif curr_time != alarm and awake == 1:\n alarm = curr_time\n buzz(10,0.5)\n time.sleep(0.25)\n buzz(20,0.5)\n\nfinally:\n GPIO.cleanup()\n print(\"End\")\n","repo_name":"ismailuddin/raspberrypi","sub_path":"alarm_clock/alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"62"} +{"seq_id":"73089790597","text":"import FWCore.ParameterSet.Config as cms\n\n### CMSSW command line parameter parser \nfrom FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('python')\noptions.register (\n 'outputFileName','file.lhe',VarParsing.multiplicity.singleton,VarParsing.varType.string,\n 'output file name created by cmsRun');\n\noptions.parseArguments()\n\nprocess = cms.Process(\"dumLHE\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10)\n)\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('/store/mc/RunIIWinter15wmLHE/VectorMonoW_Mphi-500_Mchi-1_gSM-1p0_gDM-1p0_13TeV-madgraph/LHE/MCRUN2_71_V1-v4/10000/04E93F09-696F-E511-A316-B083FECF8ACE.root',\n '/store/mc/RunIIWinter15wmLHE/VectorMonoW_Mphi-500_Mchi-1_gSM-1p0_gDM-1p0_13TeV-madgraph/LHE/MCRUN2_71_V1-v4/10000/50F00C3E-7D6F-E511-A4F8-001E6757EAA4.root',\n '/store/mc/RunIIWinter15wmLHE/VectorMonoW_Mphi-500_Mchi-1_gSM-1p0_gDM-1p0_13TeV-madgraph/LHE/MCRUN2_71_V1-v4/10000/82239C23-9E6F-E511-BF2D-00304865C40C.root'),\n processingMode = cms.untracked.string('Runs'),\n)\n\nprocess.externalLHEAsciiDumper = cms.EDAnalyzer('LHEInfoReader',\n outputLHEFileName = cms.string(options.outputFileName))\n\nprocess.p = cms.Path(process.externalLHEAsciiDumper)\n","repo_name":"kmcdermo/AnalysisCode","sub_path":"MonoXAnalysis/test/testExternalLHEDumper_cfg.py","file_name":"testExternalLHEDumper_cfg.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70473226438","text":"from run_qemu import run_qemu\nimport argparse\nimport pathlib\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Run a single test using qemu\"\n )\n parser.add_argument(\n \"--qemu-command\", required=True, help=\"qemu-system- path\"\n )\n parser.add_argument(\n \"--qemu-machine\",\n required=True,\n help=\"name of the machine to pass to QEMU\",\n )\n parser.add_argument(\n \"--qemu-cpu\", required=False, help=\"name of the cpu to pass to QEMU\"\n )\n parser.add_argument(\n \"--qemu-params\",\n required=False,\n help='list of arguments to pass to qemu, separated with \":\"',\n )\n parser.add_argument(\n \"--timeout\",\n type=int,\n default=120,\n help=\"timeout, in seconds (default: 120)\",\n )\n parser.add_argument(\n \"--execdir\",\n type=pathlib.Path,\n default=pathlib.Path.cwd(),\n help=\"directory to run the program from\",\n )\n parser.add_argument(\n \"--codesign_identity\",\n type=str,\n help=\"ignored, used for compatibility with libc++ tests\",\n )\n parser.add_argument(\n \"--env\",\n type=str,\n nargs=\"*\",\n help=\"ignored, used for compatibility with libc++ tests\",\n )\n parser.add_argument(\"image\", help=\"image file to execute\")\n parser.add_argument(\n \"arguments\",\n nargs=argparse.REMAINDER,\n default=[],\n help=\"optional arguments for the image\",\n )\n args = parser.parse_args()\n ret_code = run_qemu(\n args.qemu_command,\n args.qemu_machine,\n args.qemu_cpu,\n args.qemu_params.split(\":\") if args.qemu_params else [],\n args.image,\n [args.image] + args.arguments,\n args.timeout,\n args.execdir,\n )\n sys.exit(ret_code)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ARM-software/LLVM-embedded-toolchain-for-Arm","sub_path":"test-support/lit-exec-qemu.py","file_name":"lit-exec-qemu.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":299,"dataset":"github-code","pt":"62"} +{"seq_id":"37251848000","text":"# ÇA MARCHE !!\n\nimport socket\nimport random\nimport time\n\nHOST = '' # Symbolic name meaning all available interfaces\nPORT = 12000 # Arbitrary non-privileged port\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n\nwhile True:\n\t#s.send(\"test\\n\".encode());\n\tdata = random.randrange(0,1023,1) #start, stop, step\n\tdata = (str(data)).encode()\n\ts.send(data)\n\ttime.sleep(0.5)\n","repo_name":"pleekMan/observation","sub_path":"testPython.py","file_name":"testPython.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23435023299","text":"\nimport mne\nimport os\nimport os.path as op\nimport mayavi.mlab as mm\nimport matplotlib.pyplot as plt\n\n# set these variables\n\n\nsubjects_dir = '/storage/anat/subjects/'\nsubjects = [x for x in os.listdir(subjects_dir) if op.isdir('%s%s' % (subjects_dir, x)) and 'genz' in x]\nsubjects.sort()\n# subjects_dir = '/home/nordme/data/genz/anat/'\nn_jobs = 16\n\n# variables\n\nspacing = 5.0 # in millimeters\noverwrite = True\n\nfor subject in subjects:\n base_path = op.join(subjects_dir, subject)\n\n # run setup_source_space\n src_dir = op.join(base_path, 'bem')\n bem_path = op.join(src_dir, '%s-5120-bem-sol.fif' % subject)\n# brain_path = op.join(base_path, 'bem', 'watershed', '%s_brain_surface' % subject)\n# brain_surf = mne.read_surface(brain_path, return_dict=True)[2]\n aseg_path = op.join(base_path, 'mri', 'aseg.mgz')\n bem = mne.read_bem_solution(bem_path)\n vsurf_name = op.join(src_dir, '%s-%smm-v_aseg-src.h5' % (subject, int(spacing)))\n volume_labels = mne.get_volume_labels_from_aseg(aseg_path)\n assert volume_labels[0] == 'Unknown'\n volume_labels.pop(0)\n\n print('Working on source space for subject %s.' % subject)\n\n v_surf = mne.setup_volume_source_space(subject=subject, subjects_dir=subjects_dir,\n bem=bem, pos=spacing, mri='aseg.mgz', volume_label=volume_labels,\n add_interpolator=True )\n mne.externals.h5io.write_hdf5(vsurf_name, v_surf, overwrite=overwrite)\n# mne.write_source_spaces(vsurf_name, v_surf, overwrite=overwrite)\n # source space plot\n surf_plot = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir,\n surfaces='white', coord_frame='head', src=v_surf)\n vsurf_save = op.join(src_dir, '%s_v_aseg_plot.png' % subject)\n mm.title('Volumetric Source Space Bounded By BEM Surface')\n mm.savefig(filename=vsurf_save, figure=surf_plot)\n mm.close()\n # bem plot\n# vsurf_plot2 = mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n# src=vsurf_name)\n# vsurf2_save = op.join(src_dir, '%s_v_aseg_plot_bem.png' % subject)\n# plt.title('Volumetric Source Space Bounded By BEM Surface')\n# plt.savefig(fname=vsurf2_save, figure=vsurf_plot2)\n# plt.close()\n","repo_name":"nordme/nordme_work_repo","sub_path":"genz_v_source_space.py","file_name":"genz_v_source_space.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"20511372753","text":"from microbit import *\nimport random\nclass Env:\n def __init__(self):\n # 0: 路 1: 起点 2: 终点 3: 墙 4: 大便\n self.map = [\n [1,0,0,0,3],\n [0,3,0,0,0],\n [0,0,3,0,3],\n [0,0,0,0,0],\n [3,4,0,3,2],\n ]\n self.map_size = 5\n self.action_space = ['u', 'd', 'l', 'r']\n self.entrance = [0, 0]\n self.reach_count = 0\n self.accuracy_offset = 0\n self.reset()\n def action_index(self, a):\n return self.action_space.index(a)\n def step(self, action):\n dest = self.actor_pos[:]\n if action == 'u':\n if self.actor_pos[0] - 1 >= 0:\n dest[0] -= 1\n elif action == 'd':\n if dest[0] + 1 < self.map_size:\n dest[0] += 1\n elif action == 'l':\n if dest[1] - 1 >= 0:\n dest[1] -= 1\n elif action == 'r':\n if dest[1] + 1 < self.map_size:\n dest[1] += 1\n \n v = self.map[dest[0]][dest[1]] \n if v == 3:\n reward = -10\n done = True\n elif v == 4:\n reward = -30\n done = True\n elif v == 2:\n reward = 50\n done = True\n self.reach_count += 1\n if self.reach_count >= 10:\n self.reach_count = 0\n self.accuracy_offset += 1\n else:\n reward = -1\n done = False\n self.move_to(dest, done)\n return dest, reward, done\n def reset(self):\n self.init()\n self.actor_pos = self.entrance[:]\n display.set_pixel(self.actor_pos[1], self.actor_pos[0], 5)\n return self.actor_pos\n def move_to(self, dest_pos, done = False):\n display.set_pixel(self.actor_pos[1], self.actor_pos[0], 0)\n display.set_pixel(dest_pos[1], dest_pos[0], 5)\n self.actor_pos = dest_pos[:]\n if done == True:\n sleep(100)\n def init(self):\n for r in range(5):\n for c in range(5):\n if self.map[r][c] == 3:\n display.set_pixel(c, r, 1)\n elif self.map[r][c] == 4:\n display.set_pixel(c, r, 3)\n else:\n display.set_pixel(c, r, 0)\n \ndef find_all_max_index(arr):\n m = max(arr)\n result = []\n for i in range(len(arr)):\n if arr[i] == m:\n result.append(i)\n return result\n\nclass AI:\n def __init__(self, env, learning_rate=0.01, reward_decay=0.9, e_greddy=0.9):\n self.env = env\n self.q_table = {}\n self.lr = learning_rate\n self.elsilon = e_greddy\n self.gamma = reward_decay\n def find_max_action(self, s):\n all_max_index = find_all_max_index(self.q_table[str(s)])\n return self.env.action_space[random.choice(all_max_index)]\n def choose_action(self, s):\n self.make_sure_state_exist(s)\n r = random.randrange(0, 100)\n if r > 90 + self.env.accuracy_offset:\n action = random.choice(self.env.action_space)\n else:\n action = self.find_max_action(s)\n return action\n def make_sure_state_exist(self, s):\n if str(s) not in self.q_table.keys():\n self.q_table[str(s)] = [0] * len(self.env.action_space)\n def learn(self, s, a, r, s_):\n self.make_sure_state_exist(s_)\n a_index = self.env.action_index(a)\n q_predict = self.q_table[str(s)][a_index]\n if s_ == 2:\n q_target = r\n else:\n q_target = r + self.gamma * max(self.q_table[str(s_)])\n self.q_table[str(s)][a_index] += self.lr * (q_target - q_predict)\n def run(self):\n for episode in range(1000):\n s = self.env.reset()\n sleep(100)\n while True:\n a = self.choose_action(s)\n s_, r, done = self.env.step(a)\n self.learn(s, a, r, s_)\n s = s_\n if done:\n break\n sleep(100)\n\nagent = AI(Env())\nagent.run()\n","repo_name":"X-MENG/100Lines","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"15629218956","text":"import typing\n\nfrom sklearn.pipeline import Pipeline\n\nimport eda\nfrom hyperparameters import HyperparametersMerger\n\n\ndef create_pipeline(pipeline_proto: list, data_set_eda: eda.DataSetEDA) -> \\\n typing.Tuple[Pipeline, list, callable]:\n steps = []\n fixed_hyperparameters = {}\n tunable_hyperparameter_space = []\n merger = HyperparametersMerger()\n\n for name, pipeline_step_proto_cls in pipeline_proto:\n pipeline_step_proto = pipeline_step_proto_cls(data_set_eda=data_set_eda)\n\n steps.append((name, pipeline_step_proto.estimator_cls()))\n\n for fixed_hyperparameter, value in pipeline_step_proto.fixed_hyperparameters.items():\n hyperparameter_name = f'{name}__{fixed_hyperparameter}'\n fixed_hyperparameters[hyperparameter_name] = value\n merger.add_fixed_hyperparameter(hyperparameter_name, value)\n\n for dimension in pipeline_step_proto.tunable_hyperparameters_sampling_space:\n hyperparameter_name = f'{name}__{dimension.name}'\n dimension.name = hyperparameter_name\n tunable_hyperparameter_space.append(dimension)\n merger.add_tunable_hyperparameter(hyperparameter_name)\n\n pipeline = Pipeline(steps=steps)\n pipeline.set_params(**fixed_hyperparameters)\n\n return pipeline, tunable_hyperparameter_space, merger\n","repo_name":"marrlab/RedTell","sub_path":"classification/src/pipeline_factory.py","file_name":"pipeline_factory.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"29821567588","text":"import argparse\nfrom math import pi\nfrom typing import Dict, Union\n\nimport numpy as np\nfrom numpy import bool_, int64, ndarray\n\nimport habitat\nfrom habitat.config.default import get_config\nfrom habitat.core.simulator import Observations\nfrom habitat.sims.habitat_simulator.actions import HabitatSimActions\n\n\nclass RandomAgent(habitat.Agent):\n def __init__(self, success_distance: float, goal_sensor_uuid: str) -> None:\n self.dist_threshold_to_stop = success_distance\n self.goal_sensor_uuid = goal_sensor_uuid\n\n def reset(self) -> None:\n pass\n\n def is_goal_reached(self, observations: Observations) -> bool_:\n dist = observations[self.goal_sensor_uuid][0]\n return dist <= self.dist_threshold_to_stop\n\n def act(self, observations: Observations) -> Dict[str, int64]:\n if self.is_goal_reached(observations):\n action = HabitatSimActions.stop\n else:\n action = np.random.choice(\n [\n HabitatSimActions.move_forward,\n HabitatSimActions.turn_left,\n HabitatSimActions.turn_right,\n ]\n )\n return {\"action\": action}\n\n\nclass ForwardOnlyAgent(RandomAgent):\n def act(self, observations: Observations) -> Dict[str, int]:\n if self.is_goal_reached(observations):\n action = HabitatSimActions.stop\n else:\n action = HabitatSimActions.move_forward\n return {\"action\": action}\n\n\nclass RandomForwardAgent(RandomAgent):\n def __init__(self, success_distance: float, goal_sensor_uuid: str) -> None:\n super().__init__(success_distance, goal_sensor_uuid)\n self.FORWARD_PROBABILITY = 0.8\n\n def act(self, observations: Observations) -> Dict[str, Union[int, int64]]:\n if self.is_goal_reached(observations):\n action = HabitatSimActions.stop\n else:\n if np.random.uniform(0, 1, 1) < self.FORWARD_PROBABILITY:\n action = HabitatSimActions.move_forward\n else:\n action = np.random.choice(\n [HabitatSimActions.turn_left, HabitatSimActions.turn_right]\n )\n\n return {\"action\": action}\n\n\nclass GoalFollower(RandomAgent):\n def __init__(self, success_distance: float, goal_sensor_uuid: str) -> None:\n super().__init__(success_distance, goal_sensor_uuid)\n self.pos_th = self.dist_threshold_to_stop\n self.angle_th = float(np.deg2rad(15))\n self.random_prob = 0\n\n def normalize_angle(self, angle: ndarray) -> ndarray:\n if angle < -pi:\n angle = 2.0 * pi + angle\n if angle > pi:\n angle = -2.0 * pi + angle\n return angle\n\n def turn_towards_goal(self, angle_to_goal: ndarray) -> int:\n if angle_to_goal > pi or (\n (angle_to_goal < 0) and (angle_to_goal > -pi)\n ):\n action = HabitatSimActions.turn_right\n else:\n action = HabitatSimActions.turn_left\n return action\n\n def act(self, observations: Observations) -> Dict[str, int]:\n if self.is_goal_reached(observations):\n action = HabitatSimActions.stop\n else:\n angle_to_goal = self.normalize_angle(\n np.array(observations[self.goal_sensor_uuid][1])\n )\n if abs(angle_to_goal) < self.angle_th:\n action = HabitatSimActions.move_forward\n else:\n action = self.turn_towards_goal(angle_to_goal)\n\n return {\"action\": action}\n\n\ndef get_all_subclasses(cls):\n return set(cls.__subclasses__()).union(\n [s for c in cls.__subclasses__() for s in get_all_subclasses(c)]\n )\n\n\ndef get_agent_cls(agent_class_name):\n sub_classes = [\n sub_class\n for sub_class in get_all_subclasses(habitat.Agent)\n if sub_class.__name__ == agent_class_name\n ]\n return sub_classes[0]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--success-distance\", type=float, default=0.2)\n parser.add_argument(\n \"--task-config\",\n type=str,\n default=\"habitat-lab/habitat/config/task/pointnav.yaml\",\n )\n parser.add_argument(\"--agent-class\", type=str, default=\"GoalFollower\")\n args = parser.parse_args()\n\n config = get_config(args.task_config)\n\n agent = get_agent_cls(args.agent_class)(\n success_distance=args.success_distance,\n goal_sensor_uuid=config.habitat.task.goal_sensor_uuid,\n )\n benchmark = habitat.Benchmark(config_paths=args.task_config)\n metrics = benchmark.evaluate(agent)\n\n for k, v in metrics.items():\n habitat.logger.info(\"{}: {:.3f}\".format(k, v))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookresearch/habitat-lab","sub_path":"habitat-baselines/habitat_baselines/agents/simple_agents.py","file_name":"simple_agents.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":1467,"dataset":"github-code","pt":"62"} +{"seq_id":"1038016142","text":"\"\"\"\nModule for organizing objects in the beamline.\n\nContains utilities for grouping objects together in beamline.py to satisfy\nthe individual needs of each hutch without compromising code unification.\n\"\"\"\nimport functools\nimport inspect\nimport importlib\nfrom blutil.doctools import argspec\n\nclass SimpleContainer(object):\n \"\"\"\n Object that contains other objects as its attributes. It's a box.\n \"\"\"\n def __init__(self, **objs):\n \"\"\"\n Objs are key=value pairs to start in the box.\n \"\"\"\n for k, v in objs.items():\n setattr(self, k, v)\n\ndef alias_class(Class, **aliases):\n \"\"\"\n Make a second version of Class that has aliased attribute names.\n **aliases are old=new key value pairs, where old must be a string and new\n can either be a string or a list/tuple of strings.\n Attribute names that are omitted will carry through unaliased.\n \"\"\"\n def proxy_class(*args):\n obj = alias_object(Class(*args))\n return alias_object(obj, **aliases)\n proxy_class.__doc__ = argspec(Class.__init__) + \"\\n\" + Class.__doc__\n return proxy_class\n\ndef alias_object(obj, **aliases):\n \"\"\"\n Make a linked copy of obj that has aliased attribute names.\n **aliases are old=new key value pairs, where old must be a string and new\n can either be a string or a list/tuple of strings.\n Attribute names that are omitted will carry through unaliased.\n \"\"\"\n attrs = {}\n attrs[\"__doc__\"] = obj.__doc__\n attrs[\"__module__\"] = obj.__class__.__module__\n for a in object_attrs(obj):\n name = aliases.get(a, a)\n if isinstance(name, (list, tuple)):\n for n in name:\n attrs[n] = proxy_property(obj, a)\n else:\n attrs[name] = proxy_property(obj, a)\n AliasClass = type(obj.__class__.__name__, (object,), attrs)\n return AliasClass()\n\ndef object_merge(name, *objs):\n \"\"\"\n Create and instantiate a merged object.\n\n Merged objects have the attributes that their constituent objects have,\n and forward all getattr, setattr, delattr requests to the original object.\n \"\"\"\n attrs = {}\n doc = \"Docstrings of merged objects:\"\n for obj in objs:\n try:\n doc += \"\\n\" + obj.__doc__\n except:\n pass\n for a in object_attrs(obj):\n attrs[a] = proxy_property(obj, a)\n attrs[\"__doc__\"] = doc\n MergeClass = type(name, (object,), attrs)\n return MergeClass()\n\ndef proxy_property(obj, attr):\n \"\"\"\n Create a property object that behaves like attr from obj.\n When accessed, forwards all getattr, setattr, delattr requests to obj.\n Sets up a clean, useful docstring for ipython sessions.\n \"\"\"\n def optattr(func, obj, attr, self, *args):\n return func(obj, attr, *args)\n parts = [functools.partial(optattr, f, obj, attr) for f in (getattr, setattr, delattr)]\n property_no_doc = type(\"property\", (property,), {})\n prop = property_no_doc(*parts)\n v = getattr(obj, attr)\n doc = \"\"\n if callable(v):\n doc += argspec(v) + \"\\n\"\n if hasattr(v, \"__dict__\"):\n try:\n doc += v.__doc__\n except:\n pass\n else:\n doc += \"type \" + v.__class__.__name__\n prop.__doc__ = doc\n return prop\n\ndef object_attrs(obj):\n \"\"\"\n Return a list of valid object attributes. Ignore double underscores.\n \"\"\"\n keys = obj.__class__.__dict__.keys() + obj.__dict__.keys()\n return [k for k in keys if k[:2] != \"__\"]\n\n","repo_name":"aegger13/cxi_beamline","sub_path":"blutil/organize.py","file_name":"organize.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5609606652","text":"class Capture:\r\n def __init__(self, head, body, line):\r\n self.head = head\r\n self.body = body\r\n self.line = line\r\n\r\n def get_size(self):\r\n s = len(self.body)\r\n for it in self.body:\r\n if isinstance(it, Capture):\r\n s += it.get_size()\r\n s -= 1\r\n return s\r\n\r\n def __repr__(self):\r\n return str(self.head)\r\n\r\n def __str__(self, indent = \"\", is_last = False):\r\n s = indent\r\n if (is_last):\r\n s += \"\\\\-\"\r\n indent += \" \"\r\n else:\r\n s += \"|-\"\r\n indent += \"| \"\r\n\r\n s += \"[\"+self.head+\"]\\n\"\r\n\r\n for child in self.body:\r\n s += child.__str__(indent, child == self.body[-1])\r\n return s\r\n\r\n","repo_name":"batburger/PseudoCode","sub_path":"Parser/Capture.py","file_name":"Capture.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11068829838","text":"from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\nclass ConfusionPlotterArgParser(ArgumentParser):\n \"\"\" parses ConfusionPlotter command-line options and arguments\"\"\"\n\n def __init__(self):\n super().__init__(\n description=\"plot confusion matrices from an experiment session\",\n formatter_class=ArgumentDefaultsHelpFormatter,\n )\n self.add_argument(\n \"--experiment_dir\", \n type=str,\n help='directory containing subfolders pertaining to different experimental conditions' \n )\n self.add_argument(\n \"--num_classes\",\n type=int,\n default=1,\n help=\"Number of classes in segmentation model\",\n )","repo_name":"kungfuai/d3m-segmentation-research","sub_path":"src/evaluation/confusion_plotter_arg_parser.py","file_name":"confusion_plotter_arg_parser.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"70084030279","text":"import cv2\nimport os\ncap = cv2.VideoCapture(0)\ndir = r'C:\\Users\\tarun\\OneDrive\\Documents\\learning\\Programming\\python\\my projects\\machine learning\\training\\tee'\nwhile True:\n _, img = cap.read()\n cv2.imshow('img', img)\n key = cv2.waitKey(1)\n if key == 27:\n break\n","repo_name":"Thuppakki001/Python","sub_path":"my projects/face recogniser/program/TeeDetector.py","file_name":"TeeDetector.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1892760951","text":"#!/usr/bin/env python\n# -*- coding: utf -*-\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\n\nimport random\n\ndef main():\n server = HTTPServer(('', 8080), AuthHandler)\n server.serve_forever()\n\n\nclass AuthHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n if \"ping\" in self.path:\n self.wfile.write(\"Pong\\n\")\n else:\n self.wfile.write(\"Auth: %s\\n\" % random.choice([0,1]))\n return\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wifidog/wifidog-gateway","sub_path":"contrib/load-tester/mock_auth.py","file_name":"mock_auth.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":737,"dataset":"github-code","pt":"62"} +{"seq_id":"69995106119","text":"import logging\nfrom dataclasses import dataclass\n\nfrom kubernetes import client\nfrom kubernetes.client.rest import ApiException\n\n__plural = \"buckets\"\n\n__version = \"v1alpha2\"\n\n__group = \"assetstore.kyma-project.io\"\n\n\n@dataclass\nclass BucketCfg:\n bucket_name: str\n namespace: str\n\n\ndef __create_bucket_body(bucket_name, namespace: str, **kwargs) -> dict:\n return {\n \"kind\": \"Bucket\",\n \"apiVersion\": \"assetstore.kyma-project.io/v1alpha2\",\n \"metadata\": {\n \"name\": bucket_name,\n \"namespace\": namespace,\n },\n \"spec\": {\n \"region\": \"us-east-1\",\n \"policy\": \"writeonly\",\n }\n }\n\n\ndef create_bucket(bucket_cfg: BucketCfg):\n try:\n client.CustomObjectsApi().create_namespaced_custom_object(\n group=__group,\n version=__version,\n namespace=bucket_cfg.namespace,\n plural=__plural,\n body=__create_bucket_body(\n bucket_cfg.bucket_name,\n bucket_cfg.namespace))\n\n except ApiException as e:\n if e.status == 409:\n msg = f'{bucket_cfg.bucket_name} already exist in {bucket_cfg.namespace}'\n logging.info(msg)\n\n except KeyboardInterrupt:\n logging.warning('create_bucket interrupted')\n","repo_name":"m00g3n/bas","sub_path":"bas/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22944181683","text":"\r\ndef pn(num):\r\n print(\"Telephone Numbers:\")\r\n for n, i in num.items():\r\n print(\"Name:\", n, \"\\tNumber:\", i)\r\n print()\r\n \r\ndef add(num, N, u):\r\n num[N] = u\r\n \r\ndef find(num, N):\r\n if N in num:\r\n return \"The number is \" + num[N]\r\n else:\r\n return N + \" was not found\"\r\n \r\ndef remove(num, N):\r\n if N in num:\r\n del num[N]\r\n else:\r\n print(N,\" was not found\")\r\n \r\ndef load(num, fname):\r\n f1 = open(fname, \"rt\")\r\n while True:\r\n l = f1.readline()\r\n if not l:\r\n break\r\n l = l[:-1]\r\n N, u = l.split(\",\")\r\n num[N] = u\r\n f1.close()\r\n \r\ndef save(num, fname):\r\n f2 = open(fname, \"wt\")\r\n for n, i in num.items():\r\n f2.write(n + \",\" + i + \"\\n\")\r\n f2.close()\r\n \r\ndef menu():\r\n print('1. Print Phone Numbers')\r\n print('2. Add a Phone Number')\r\n print('3. Remove a Phone Number')\r\n print('4. Lookup a Phone Number')\r\n print('5. Load numbers')\r\n print('6. Save numbers')\r\n print('7. Quit')\r\n print()\r\n \r\np = {}\r\nc = 0\r\nmenu()\r\nwhile True:\r\n c = int(input(\"Type in a number (1-7): \"))\r\n if c == 1:\r\n pn(p)\r\n elif c == 2:\r\n print(\"Add Name and Number\")\r\n N = input(\"Name: \")\r\n ph = input(\"Number: \")\r\n add(p, N, ph)\r\n elif c == 3:\r\n print(\"Remove Name and Number\")\r\n N = input(\"Name: \")\r\n remove(p, N)\r\n elif c == 4:\r\n print(\"Locate Number\")\r\n N= input(\"Name: \")\r\n print(find(p, N))\r\n elif c == 5:\r\n fname = input(\"Filename to load: \")\r\n load(p, fname)\r\n elif c == 6:\r\n fname = input(\"Filename to save: \")\r\n save(p, fname)\r\n elif c == 7:\r\n break\r\n else:\r\n menu()\r\n \r\nprint(\"Goodbye\")\r\n","repo_name":"anuradha9712/Python-codes","sub_path":"telephone directory.py","file_name":"telephone directory.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9819512979","text":"from random import randint\nfrom time import sleep\n\n\n# Creates a 10 by 10 array full of zeros using list comprehension\n\n# Creates a 10x10 array full of whatever is stored in me.map.\ndef create_board():\n local_board = [[me.map for x in range(10)] for x in range(10)]\n\n for x in range(0, 20): # Repeats 20 times for 20 bombs\n x = randint(0, 9) # picks a random number from 1 to 10 for x&y coord\n y = randint(0, 9) #\n local_board[y][x] = 1 # sets it to 1 so that when the player is on 1, the game knows a bomb is in that spot\n local_board[0][0], local_board[9][9] = me.character, 0 # spawn point can't have a bomb\n # board[9][9] = 0# end point can't have a bomb\n return local_board\n\n\ndef showboard(board):\n for item in board:\n item = [str(x) for x in item] # Turns each item in the board into a string\n print(\" \".join(item))\n # Goes through each item in the board and prints it neatly\n\n\nclass player:\n def __init__(self, lives, coins, position, map, bomb=1): # Constructor\n self.lives = lives\n self.coins = coins\n self.position = position\n self.control_scheme = True\n self.udlr = False\n self.wasd = True\n self.map = map\n self.inventory = []\n self.character = \"x\"\n self.bomb = bomb\n\n def get_coords(self):\n return [self.position[0], self.position[1]]\n\n def lose_life(self, board):\n self.lives -= 1 # Take away lives from the player\n print(\n f\"Uh oh! You hit a bomb and lost a life!\\nYou now have {f'{self.lives} lives left' if self.lives != 1 else f'{self.lives} life left'}\"\n )\n if self.lives == 0: # check if the player should be dead\n # print(self.lives)\n self.dead(board)\n\n def dead(self, board):\n revive = False # Don't know why this works really. If it ain't broke, don't fix it\n print(\"You just died! Game Over..?\\nWait what?\")\n print(\"You've been given a special oppertunity to continue playing!\")\n print(\"For the low low price of 5 coins, you can revive yourself!\")\n ans = input(\"Would you like to revive yourself?\\n1. Yes\\n2. Accept death\\n\")\n if ans == \"1\":\n if self.coins >= 5:\n self.coins -= 5\n self.lives += 1\n print(f\"You have revived yourself! You now have {self.lives} lives!\")\n self.navigate(self.position, board,move=\"revived\")\n revive = True\n else:\n print(\"You do not have enough coins to revive yourself\")\n else:\n print(\"Off to the shadow realm you go!\")\n \n\n if not revive:\n board[self.position[0]][self.position[1]] = self.map # Removes the 'x' from the end position\n self.position = [0, 0] # Resets the player position\n self.lives = 1\n menu()\n # Will add more stuff here soontm\n\n def navigate(self, position, board, move):\n board[position[0]][position[1]] = self.map\n # Since \"x\" replaces the 0 to point out where the player is, this resets the previous position to 0\n # before we move the player to the new position\n\n self.control_scheme = \"wasd\" if self.wasd else \"udlr\"\n commands = {\n \"wasd\": {\"w\": [-1, 0], \"a\": [0, -1], \"s\": [1, 0], \"d\": [0, 1]},\n \"udlr\": {\"u\": [-1, 0], \"d\": [1, 0], \"l\": [0, -1], \"r\": [0, 1]},\n }\n\n if (\n move in commands[self.control_scheme]\n ): # Checks to see if the move is in the dictionary by using the key stored in \"control scheme\"\n pos_change = commands[self.control_scheme][move] # Adds the vector to the position\n new_pos = [\n position[0] + pos_change[0],\n position[1] + pos_change[1],\n ] \n if not self.is_valid_position(new_pos, board):\n print(\"You cannot move there\")\n return\n self.position = new_pos # Puts the player on the new position bing bong\n print(f\"Moved successfully. You are now at {self.position[1]},{self.position[0]}\") # x,y as self.position is in the form y,x\n if (board[self.position[0]][self.position[1]] == 1): # Checks for bombs and takes away lives\n self.lose_life(board)\n board[self.position[0]][self.position[1]] = \"x\" # Marks player position with an 'x'\n showboard(board) # this is reset at the start of the function\n print(\"|---------------------------|\")\n else: \n print(\"This is not a valid move\") if move != \"revived\" else print(\"\")\n return\n if (\n self.position[0] == 9 and self.position[1] == 9\n ): # Checks if the player has won\n print(\"|---------------------------|\")\n showboard(board)\n self.coins += 10\n print(f\"You have reached the end! You now have {self.coins} coins!\")\n print(\"|---------------------------|\")\n print(\n \"Would you like to play again or return to the main menu?\\n1. Play again\\n2. Main Menu\"\n )\n board[position[0]][\n position[1]\n ] = self.map # Removes the 'x' from the end position\n self.position = [0, 0] # Resets the player position\n ans = input(\"\")\n if ans == \"1\":\n play()\n else:\n menu()\n\n def is_valid_position(self, position, board):\n y, x = position\n if (\n y < 0\n or y >= len(board)\n or x < 0\n or x >= len(board[0])\n or board[y][x] == \"X\"\n ):\n return False\n # Ensures that the position is within the confines of the board (10x10) and returns a boolean\n return True\n\n def add_item(self, item):\n self.inventory.append(item)\n print(f\"{item} has been added to your inventory\")\n\n def purchase(self, item):\n if self.coins >= item.cost: # Checks to see if the player isn't poor\n self.coins -= item.cost # Takes away the cost of the item from the player\n self.add_item(item)\n print(f\"You have bought {item}! You now have {self.coins} coins left!\")\n else:\n print(\"You do not have enough coins to buy that item\")\n menu()\n\n def inventoryView(self):\n print(\"Inventory:\")\n for item in self.inventory: # Prints each item in the inventory | C# equivalent of foreach\n print(f\"[-] {item}\")\n\n def select_item(self, item,map=False,character=False,bomb=False):\n change = False\n if item not in self.inventory:\n print(\"You do not own this item\")\n sleep(1.5)\n print(self.inventory)\n shop()\n if not verify(item):\n print(\"This is not a valid item\")\n print(self.inventory)\n shop()\n\n me.map = item if map else me.map# Sets the map to the item if map is true\n me.character = item if character else me.character# Sets the character to the item if map is false\n me.bomb = item if bomb else me.bomb# Sets the bomb to the item if map is false\n print(f\"You have selected {item} as your {f'map' if map else f'character' if character else f'bomb'}\")\n # Copilot is cool\ndef verify(item):\n map_designs = {\"0\": 10, \"/\": 40, \"#\": 60}\n characters = {\"x\": 10, \"o\": 40, \"@\": 60}\n bombs = {\"v\":10,\"*\":40,\"+\":60}\n if item in bombs or item in characters or item in map_designs:\n return True\n return False\n\ndef settings():\n print(\"[+] Settings [+]\")\n print(\n \"1. Controls\\\n \\n2. Customization\\\n \\n3. Exit\"\n ) # The \\ is used to continue the string on the next line\n\n ans = input(\"What would you like to change? \").lower()\n if ans == \"back\" or ans == \"bac\" or ans == \"exit\" or ans == \"leave\" or ans == \"return\" or ans == \"menu\" or ans == \"go back\":\n menu()\n if ans == \"1\": # Self explanatory\n sleep(1.5)\n print(\"[+] Settings -> Controls [+]\")\n print(\"1. WASD\")\n print(\"2. UDLR\")\n ans = input(\"Which mode would you like to it change to? \").lower()\n if ans == \"1\" or ans == \"wasd\":\n me.control_scheme = True\n me.udlr, me.wasd = False, True\n print(\"Mode changed to WASD\")\n sleep(1.5)\n menu()\n elif ans == \"2\" or ans == \"udlr\":\n me.control_scheme = False\n me.udlr, me.wasd = True, False\n print(\"Mode changed to UDLR\")\n sleep(1.5)\n menu()\n else:\n print(\"That is not a valid choice\")\n sleep(1.5)\n menu()\n elif ans == \"2\":\n print(\"[+] Settings -> Customization [+]\\n\")\n print(\"1. Change map\")\n print(\"2. Change character\")\n print(\"3. Change bombs\")\n print(\"\\nNote: Items can be used interchangeably.\\nThis means map designs can be used as characters and vice versa\\n\")\n ans = input(\"What would you like to change?\\n\").lower()\n if ans == \"1\":\n print(\"Which map would you like to use?\")\n me.inventoryView()\n me.select_item(input(\"\"),map=True)\n sleep(1.5)\n menu()\n elif ans == \"2\":\n print(\"Which character would you like to use?\")\n me.inventoryView()\n me.select_item(input(\"\"),character=True)\n sleep(1.5)\n menu()\n elif ans == \"3\":\n print(\"What would you like your bombs to appear as?\")\n me.inventoryView()\n me.select_item(input(\"\"),bomb=True)\n sleep(1.5)\n menu()\n\n\n\ndef menu(): # Basic if/else menu\n print( \"-- Main Menu --\\n\")\n print( f\"Coins: {me.coins}\")\n print( \"1. Play\")\n print( \"2. Shop\")\n print( \"3. Settings\")\n print( \"4. Exit\")\n #sleep(1.5)\n while 1: # While 1 instead of True since I'm fancy\n choice = input(\"\\nWhat would you like to do?\\n\")\n if choice == \"1\":\n play()\n elif choice == \"2\":\n shop()\n elif choice == \"3\":\n settings()\n elif choice == \"4\":\n print(\"Hope you had fun!\")\n exit()\n else:\n print(\"That is not a valid choice\")\n\n\ndef play():\n local_board = create_board()\n showboard(local_board)\n print(\n f\"Which direction do you want to move in? Use {f'WASD' if me.wasd else f'UDLR'} to move.\"\n )\n while True:\n move = input(\"\").lower()\n me.navigate(me.position,local_board,move=move)\n\n# 11:16 no longer know what I'm doing\ndef shop():\n map_designs = {\"0\": 10, \"/\": 40, \"#\": 60}\n characters = {\"x\": 10, \"o\": 40, \"@\": 60}\n bombs = {\"v\":10,\"*\":40,\"+\":60}\n print(\"[+] Shop [+]\")\n print(f\"Coins: {me.coins}\\n\")\n print(\"[-] Map Designs [-]\")\n print(\"[-] Characters [-]\")\n print(\"[-] Bombs [-]\")\n ans = input(\"\").lower()\n print(f\"ans = {ans}\")\n if ans == \"back\" or ans == \"bac\" or ans == \"exit\" or ans == \"leave\" or ans == \"return\" or ans == \"menu\":\n menu()\n if ans == \"1\" or ans in \"map_designs\":\n print(\"Type 'buy' to buy an item or 'back' to return to the shop\")\n for item, cost in map_designs.items():\n print(f\"{item}: {cost} coins\")\n ans = input(\"\")\n elif ans == \"2\" or ans in \"characters\":\n print(\"Type 'buy' to buy an item or 'back' to return to the shop\")\n for item, cost in characters.items():\n print(f\"{item}: {cost} coins\")\n ans = input(\"\")\n elif ans == \"3\" or ans in \"bombs\":\n print(\"Type 'buy' to buy an item or 'back' to return to the shop\")\n for item, cost in bombs.items():\n print(f\"{item}: {cost} coins\")\n ans = input(\"\")\n\n if \"buy\" in ans:\n ans = ans.replace(\"buy\", \"\").strip()\n if ans in me.inventory:\n print(\"You already own this item\")\n sleep(1.5)\n shop()\n if ans in map_designs:\n if me.coins >= map_designs[ans]:\n me.coins -= map_designs[ans]\n print(f\"You have bought {ans}! You now have {me.coins} coins left!\")\n me.inventory.append(ans)\n sleep(1.5)\n menu()\n else:\n print(\"You do not have enough coins to buy that item\")\n sleep(1.5)\n shop()\n \n elif ans in characters:\n if me.coins >= characters[ans]:\n me.coins -= characters[ans]\n print(f\"You have bought {ans}! You now have {me.coins} coins left!\")\n me.inventory.append(ans)\n sleep(1.5)\n menu()\n else:\n print(\"You do not have enough coins to buy that item\")\n sleep(1.5)\n shop()\n elif ans in bombs:\n if me.coins >= bombs[ans]:\n me.coins -= bombs[ans]\n print(f\"You have bought {ans}! You now have {me.coins} coins left!\")\n me.inventory.append(ans)\n sleep(1.5)\n menu()\n else:\n print(\"You do not have enough coins to buy that item\")\n sleep(1.5)\n shop()\n\n else:\n print(\"That is not a valid item\")\n sleep(1.5)\n shop()\n else:\n print(\"Type 'buy' followed by the item your purchasing\")\n sleep(1.5)\n shop()\n print(\"Fail\")\n menu()\n # add some stuff that lets the user leave the shop one page at a time\n # or to just go back to the main menu directly.\n # Also add a way to buy stuff\n\n\nme = player(1, 500, [0, 0], \"-\") # Creates a player\n#me.add_item(\"Sword\")\nprint(\"Welcome to the bomb game!\\n\")\nmenu()\n\"\"\"\nCustom controls\nLevels and xp\nHarder modes\nnxn boards\ncustomize character (name, color, etc)\ncustomize board\nmaps\nBlind character and only allow them to see a certain area around them\nChain bombs\n\"\"\"\n","repo_name":"SkellXC/Bomb-Game","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"41626748331","text":"import struct\nimport time\n\nfrom oppy.cell.cell import Cell\nimport oppy.cell.definitions as DEF\n\nfrom oppy.cell.exceptions import BadCellHeader, BadPayloadData\nfrom oppy.cell.util import TLVTriple\n\n\nclass FixedLenCell(Cell):\n '''A container class for representing fixed-length cells.'''\n\n _subclass_map = None\n\n def __init__(self, header, payload=None):\n '''Create a :class:`~oppy.cell.fixedlen.FixedLenCell` with the\n using *header*.\n\n :param :class:`oppy.cell.fixedlen.FixedLenCell.Header` header: The\n cell header information to be used.\n :param str payload: Cell payload to be used.\n '''\n if not isinstance(header, FixedLenCell.Header):\n msg = 'Expected cell header type FixedLenCell.Header, but '\n msg += 'received header of type: {}'.format(type(header))\n raise BadCellHeader(msg)\n\n self.header = header\n self.payload = payload\n\n @staticmethod\n def padCellBytes(cell_bytes, link_version=3):\n '''Pad cell_bytes to uniform length.\n\n Length depends on the Link Protocol version in use.\n\n :param str cell_bytes: byte string to pad\n :param int link_version: Link Protocol version in use\n :returns: **str** cell_bytes padded to a fixed-length\n '''\n PAD_BYTE = '\\x00'\n if link_version <= 3:\n pad_len = DEF.FIXED_LEN_V3_LEN\n else:\n pad_len = DEF.FIXED_LEN_V4_LEN\n return cell_bytes + PAD_BYTE * (pad_len - len(cell_bytes))\n\n def getBytes(self, trimmed=False):\n '''Build and return the raw bytes this cell represents.\n\n :param bool trimmed: ignored\n :returns: **str** byte representation of this cell.\n '''\n return self.header.getBytes() + self.payload\n\n def payloadRange(self):\n '''Return a two-tuple representing the (start, end) positions of this\n cell's payload data (based on Link Protocol version in use).\n\n :returns: **tuple, int** (start, end) indices of payload.\n '''\n if 1 <= self.header.link_version <= 3:\n return DEF.PAYLOAD_START_V3, DEF.FIXED_LEN_V3_LEN\n elif self.header.link_version <= 4:\n return DEF.PAYLOAD_START_V4, DEF.FIXED_LEN_V4_LEN\n else:\n fmt = \"The cell's link version is invalid: {}\"\n raise ValueError(fmt.format(self.header.link_version))\n\n def _parseHeader(self, data):\n # This check is only useful for debugging purposes. For fixed-length\n # cells, this method actually doesn't need to do anything, because\n # all header fields have already been parsed.\n already_parsed = (self.header.circ_id,\n self.header.cmd,\n self.header.link_version)\n for field in already_parsed:\n assert field is not None\n\n def _parsePayload(self, data):\n start, end = self.payloadRange()\n self.payload = data[start:end]\n\n @staticmethod\n def _initSubclassMap():\n FixedLenCell._subclass_map = {\n DEF.PADDING_CMD : PaddingCell,\n DEF.CREATE_CMD : CreateCell,\n DEF.CREATED_CMD : CreatedCell,\n DEF.RELAY_CMD : EncryptedCell,\n DEF.DESTROY_CMD : DestroyCell,\n DEF.CREATE_FAST_CMD : CreateFastCell,\n DEF.CREATED_FAST_CMD: CreatedFastCell,\n DEF.NETINFO_CMD : NetInfoCell,\n DEF.RELAY_EARLY_CMD : EncryptedCell,\n DEF.CREATE2_CMD : Create2Cell,\n DEF.CREATED2_CMD : Created2Cell\n }\n\n @staticmethod\n def _extractCmd(data, header):\n return header.cmd\n\n class Header(object):\n '''A simple container class for representing the header information of\n a fixed-length cell.'''\n\n def __init__(self, circ_id=None, cmd=None, link_version=3):\n self.circ_id = circ_id\n self.cmd = cmd\n self.link_version = link_version\n\n def getBytes(self):\n '''Return the raw bytes of this header.\n\n :returns: **str** byte representation of this header\n '''\n fmt = \"!HB\" if self.link_version <= 3 else \"!IB\"\n return struct.pack(fmt, self.circ_id, self.cmd)\n\n def __repr__(self):\n fmt = \"FixedLenCell.Header(circ_id={}, cmd={}, link_version={})\"\n return fmt.format(self.circ_id, self.cmd, self.link_version)\n\n def __eq__(self, other):\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n\nHTYPE_LEN = 2\nHLEN_LEN = 2\n\n\nclass Create2Cell(FixedLenCell):\n '''.. note:: tor-spec, Section 5.1'''\n\n def __init__(self, header, htype=None, hlen=None, hdata=None):\n '''\n :param :class:`~oppy.cell.fixedlen.FixedLenCell.Header` header:\n initialized header to use with this cell\n :param int htype: Handshake type in use\n :param int hlen: Length of the handshake data\n :param str hdata: Actual handshake data to use (onion skin)\n '''\n self.header = header\n self.htype = htype\n self.hlen = hlen\n self.hdata = hdata\n\n @staticmethod\n def make(circ_id, htype=DEF.NTOR_HTYPE, hlen=DEF.NTOR_HLEN, hdata='',\n link_version=3):\n '''Build and return a Create2 cell, using default values where\n possible.\n\n Automatically create and use an appropriate FixedLenCell.Header.\n\n .. note: oppy only supports the NTor handshake, so *make()* will\n currently reject any *htype*'s or *hlen*'s that are not\n recognized as used in the NTor handshake.\n\n :param int circ_id: Circuit ID to use for this cell\n :param int hlen: Length of **hdata** segment\n :param str hdata: Actual handshake data to use (an *onion skin*)\n :param int link_version: Link Protocol version in use\n :returns: :class:`~oppy.cell.fixedlen.Create2Cell`\n '''\n if htype != DEF.NTOR_HTYPE:\n msg = 'htype was {}, but we currently only can do '\n msg += '{} (NTor)'\n raise BadPayloadData(msg.format(htype, DEF.NTOR_HTYPE))\n\n if hlen != DEF.NTOR_HLEN:\n msg = 'htype was NTor but hlen was {}, expected {}.'\n raise BadPayloadData(msg.format(hlen, DEF.NTOR_HLEN))\n\n if hlen != len(hdata):\n msg = 'hlen was {}, but len(hdata) was {}.'\n raise BadPayloadData(msg.format(hlen, len(hdata)))\n\n h = FixedLenCell.Header(circ_id=circ_id,\n cmd=DEF.CREATE2_CMD,\n link_version=link_version)\n\n return Create2Cell(h, htype=htype, hlen=hlen, hdata=hdata)\n\n def getBytes(self, trimmed=False):\n '''Construct and return the byte string represented by this cell.\n\n :param bool trimmed: If **True**, return just the cell bytes with no\n padding. Otherwise, pad cell bytes out to fixed-length size\n according to Link Protocol version in use.\n :returns: **str** formatted byte string represented by this cell\n '''\n ret = self.header.getBytes()\n ret += struct.pack('!H', self.htype) + struct.pack('!H', self.hlen)\n ret += self.hdata\n if trimmed is True:\n return ret\n else:\n return FixedLenCell.padCellBytes(ret, self.header.link_version)\n\n def _parsePayload(self, data):\n '''Parse the string *data* and extract cell fields.\n\n Set this cell's attributes from extracted values.\n\n :param str data: string to parse\n '''\n start, end = self.payloadRange()\n offset = start\n\n if end - start < HTYPE_LEN + HLEN_LEN:\n msg = \"Create2Cell payload was not enough bytes to construct \"\n msg += \"a valid Create2Cell.\"\n raise BadPayloadData(msg)\n\n self.htype = struct.unpack('!H', data[offset:offset + HTYPE_LEN])[0]\n\n if self.htype != DEF.NTOR_HTYPE:\n msg = \"Create2 got htype: {}, but oppy only supports ntor: {}.\"\n raise BadPayloadData(msg.format(self.htype, DEF.NTOR_HTYPE))\n\n offset += HTYPE_LEN\n\n self.hlen = struct.unpack('!H', data[offset:offset + HLEN_LEN])[0]\n\n if self.hlen != DEF.NTOR_HLEN:\n msg = \"Create2 got hlen: {}, but oppy only supports ntor hlen: {}.\"\n raise BadPayloadData(msg.format(self.hlen, DEF.NTOR_HLEN))\n\n offset += HLEN_LEN\n\n try:\n self.hdata = data[offset:offset + self.hlen]\n except IndexError:\n msg = \"Create2 hlen was specified to be {} bytes, but actual \"\n msg += \"hdata was {} bytes.\"\n raise BadPayloadData(msg.format(self.hlen, len(data) - offset))\n\n def __repr__(self):\n fmt = '{}, htype={}, hlen={}, hdata={}'\n fmt = 'Create2Cell({})'.format(fmt)\n return fmt.format(repr(self.header), repr(self.htype),\n repr(self.hlen), repr(self.hdata))\n\n\nclass Created2Cell(FixedLenCell):\n '''.. note:: tor-spec, Section 5.1'''\n\n def __init__(self, header, hlen=None, hdata=None):\n '''\n :param :class:`~oppy.cell.fixedlen.FixedLenCell.Header` header:\n Initialized header to use in this cell.\n :param int hlen: Length of this cell's hdata field\n :param str hdata: Actual handshake data (*onion skin*)\n '''\n self.header = header\n self.hlen = hlen\n self.hdata = hdata\n\n def getBytes(self, trimmed=False):\n '''Construct and return the byte string represented by this cell.\n\n :param bool trimmed: If **True**, return just the bytes without\n padding. Otherwise, pad length out to fixed-length cell size\n according to Link Protocol version in use.\n :returns: **str** raw byte string this cell represents.\n '''\n ret = self.header.getBytes()\n ret += struct.pack('!H', self.hlen)\n ret += self.hdata\n if trimmed is True:\n return ret\n else:\n return FixedLenCell.padCellBytes(ret, self.header.link_version)\n\n @staticmethod\n def make(circ_id, hlen=DEF.NTOR_HLEN, hdata='', link_version=3):\n '''Build and return a Created2 cell, using default values where\n possible.\n\n Automatically create and use an appropriate FixedLenCell.Header.\n\n .. note: oppy only supports the NTor handshake, so *make()* will\n currently reject any *htype*'s or *hlen*'s that are not\n recognized as used in the NTor handshake.\n\n :param int circ_id: Circuit ID to use for this cell\n :param int hlen: Length of **hdata** segment\n :param str hdata: Actual handshake data to use (an *onion skin*)\n :param int link_version: Link Protocol version in use\n :returns: :class:`~oppy.cell.fixedlen.Created2Cell`\n '''\n if hlen != DEF.NTOR_HLEN:\n msg = 'hlen was {}, expected {}.'\n raise BadPayloadData(msg.format(hlen, DEF.NTOR_HLEN))\n\n if hlen != len(hdata):\n msg = 'hlen was {}, but len(hdata) was {}.'\n raise BadPayloadData(msg.format(hlen, len(hdata)))\n\n h = FixedLenCell.Header(circ_id=circ_id,\n cmd=DEF.CREATED2_CMD,\n link_version=link_version)\n\n return Created2Cell(h, hlen=hlen, hdata=hdata)\n\n def _parsePayload(self, data):\n '''Parse the string *data* and extract cell fields.\n\n Set the attributes of this cell.\n\n :param str data: string to parse\n '''\n start, _ = self.payloadRange()\n offset = start\n\n self.hlen = struct.unpack('!H', data[offset:offset + HLEN_LEN])[0]\n offset += HLEN_LEN\n\n self.hdata = data[offset:offset + self.hlen]\n\n def __repr__(self):\n fmt = '{}, hlen={}, hdata={}'\n fmt = 'Created2Cell({})'.format(fmt)\n return fmt.format(repr(self.header), repr(self.hlen),\n repr(self.hdata))\n\n\nclass CreatedFastCell(FixedLenCell):\n '''.. note:: Not Implemented.'''\n def __init__(self, header):\n raise NotImplementedError(\"Can't make CreatedFastCell yet.\")\n\n\nclass CreatedCell(FixedLenCell):\n '''.. note:: Not Implemented.'''\n def __init__(self, header):\n raise NotImplementedError(\"Can't make CreatedCell yet.\")\n\n\nclass CreateFastCell(FixedLenCell):\n '''.. note:: Not Implemented.'''\n def __init__(self, header):\n raise NotImplementedError(\"Can't make CreateFastCell yet.\")\n\n\nclass CreateCell(FixedLenCell):\n '''.. note:: Not Implemented.'''\n def __init__(self, header):\n raise NotImplementedError(\"Can't make CreateCell yet.\")\n\n\nREASON_LEN = 1\n\n\nclass DestroyCell(FixedLenCell):\n '''.. note:: tor-spec, Section 5.4'''\n\n def __init__(self, header, reason=None):\n '''\n :param :class:`~oppy.cell.fixedlen.FixedLenCell.Header` header:\n Initialized header to use in this cell\n :param int reason: Reason this DestroyCell was being sent.\n '''\n self.header = header\n self.reason = reason\n\n # DESTROY_NONE should always be sent forward to avoid leaking version\n @staticmethod\n def make(circ_id, reason=DEF.DESTROY_NONE, link_version=3):\n '''Build and return a Destroy cell, using default values where\n possible.\n\n Automatically create and use an appropriate FixedLenCell.Header.\n\n .. warning: reason 0 (DESTROY_NONE in oppy.cell.definitions)\n should always be sent forward to avoid leaking version\n information.\n\n :param int circ_id: Circuit ID to use for this cell\n :param int reason: Reason this DESTROY cell is being sent\n :param int link_version: Link Protocol version in use\n :returns: :class:`~oppy.cell.fixedlen.DestroyCell`\n '''\n h = FixedLenCell.Header(circ_id=circ_id,\n cmd=DEF.DESTROY_CMD,\n link_version=link_version)\n\n if reason not in DEF.DESTROY_TRUNCATE_REASONS:\n msg = 'Unrecognized DESTROY reason: {}'.format(reason)\n raise BadPayloadData(msg)\n\n return DestroyCell(h, reason=reason)\n\n def getBytes(self, trimmed=False):\n '''Construct and return the byte string represented by this cell.\n\n :param bool trimmed: If **True**, return just the bytes without\n padding. Otherwise, pad length out to fixed-length cell size\n according to Link Protocol version in use.\n :returns: **str** raw byte string this cell represents\n '''\n ret = self.header.getBytes()\n ret += struct.pack('!B', self.reason)\n if trimmed is True:\n return ret\n else:\n return FixedLenCell.padCellBytes(ret, self.header.link_version)\n\n def _parsePayload(self, data):\n '''Parse the string *data* and extract cell fields.\n\n Set this cell's attributes.\n\n :param str data: string to parse\n '''\n start, _ = self.payloadRange()\n self.reason = struct.unpack('!B', data[start:start + REASON_LEN])[0]\n if self.reason not in DEF.DESTROY_TRUNCATE_REASONS:\n msg = 'Unrecognized DESTROY reason: {}'.format(self.reason)\n raise BadPayloadData(msg)\n\n def __repr__(self):\n fmt = '{}, reason={}'\n fmt = 'DestroyCell({})'.format(fmt)\n return fmt.format(repr(self.header), repr(self.reason))\n\n\nclass EncryptedCell(FixedLenCell):\n '''\n .. note::\n EncryptedCell is not a defined cell type in tor-spec, but\n we use it as a convenient way to represent RELAY cells or\n RELAY_EARLY cells that have either been encrypted by oppy or\n received from the network and have not been decrypted yet.\n '''\n\n def __init__(self, header, enc_payload=None):\n '''\n :param :class:`~oppy.cell.fixedlen.FixedLenCell.Header` header:\n header to use with this cell\n :param str enc_payload: encrypted payload for use with this cell\n '''\n self.header = header\n self.enc_payload = enc_payload\n\n @staticmethod\n def make(circ_id, payload, link_version=3, early=False):\n '''Build and return a Destroy cell, using default values where\n possible.\n\n Automatically create and use an appropriate FixedLenCell.Header. The\n *early* parameter specifies whether we should send a RELAY cell or\n a RELAY_EARLY cell.\n\n .. warning::\n\n RELAY_EARLY cells should always be used during circuit creation\n to avoid certain classes of attacks. That is, whenever oppy\n sends a relay EXTEND2 cell, it would be sent as a RELAY_EARLY\n cell instead of a RELAY cell.\n\n Reference: tor-spec, Section 5.6\n\n .. note: *payload* field should be fully padded and equal to\n maximum relay cell payload length (498).\n\n :param int circ_id: Circuit ID to use for this cell\n :param str payload: Payload bytes to use in this cell\n :param int link_version: Link Protocol version in use\n :param bool early: Dictate whether or not to use a RELAY_EARLY cell\n :returns: :class:`~oppy.cell.fixedlen.EncryptedCell`\n '''\n if len(payload) != DEF.MAX_PAYLOAD_LEN:\n msg = 'EncryptedCell enc_payload should be padding to length {}; '\n msg += 'found enc_payload length {} instead.'\n msg = msg.format(DEF.MAX_PAYLOAD_LEN, len(payload))\n raise BadPayloadData(msg)\n\n cmd = DEF.RELAY_EARLY_CMD if early is True else DEF.RELAY_CMD\n\n h = FixedLenCell.Header(circ_id=circ_id, cmd=cmd,\n link_version=link_version)\n\n return EncryptedCell(h, enc_payload=payload)\n\n def getBytes(self, trimmed=False):\n '''Construct and return the byte string represented by this cell.\n\n :param bool trimmed: ignored, encrypted cell's don't know anything\n about their payload or its length\n :returns: **str** raw byte string this cell represents\n '''\n return self.header.getBytes() + self.enc_payload\n\n def _parsePayload(self, data):\n '''Parse the string *data* and extract cell fields.\n\n .. note::\n\n EncryptedCell does not try to interpret the payload,\n assuming that it is encrypted and unreadable and will be\n decrypted and parsed somewhere else.\n\n :param str data: string to parse\n '''\n start, end = self.payloadRange()\n self.enc_payload = data[start:end]\n\n def __repr__(self):\n fmt = \"EncryptedCell({}, enc_payload={})\"\n return fmt.format(repr(self.header), repr(self.enc_payload))\n\n\nTIMESTAMP_LEN = 4\nNUM_ADDRESSES_LEN = 1\nMAX_THIS_OR_ADDRESSES = 5\n\n\nclass NetInfoCell(FixedLenCell):\n '''.. note:: tor-spec.txt, Section 4.5'''\n\n def __init__(self, header, timestamp=None, other_or_address=None,\n num_addresses=None, this_or_addresses=None):\n '''\n .. note: Addresses here are represented as type/length/value\n structures, defined in :class:`~oppy.cell.util.TLVTriple`.\n\n Reference: tor-spec.txt, Section 6.4\n\n :param :class:`~oppy.cell.fixedlen.FixedLenCell.Header` header:\n header to use with this cell\n :param str timestamp: Time this NetInfoCell was created. Big-endian\n unsigned integer of seconds since the Unix epoch (in packed\n byte format).\n :param :class:`~oppy.cell.util.TLVTriple` other_or_address: Remote\n address associated with this NetInfoCell. If we are the\n initiator of this cell, this is the relay's address that we're\n communicating with. If we are the recipient, this is our public\n IP address.\n :param int num_addresses: the number of this_or_addresses included\n in this NetInfoCell.\n :param list, `~oppy.cell.util.TLVTriple` this_or_addresses: List of\n originating public IP addresses of this NetInfoCell.\n '''\n self.header = header\n self.timestamp = timestamp\n self.other_or_address = other_or_address\n self.num_addresses = num_addresses\n self.this_or_addresses = this_or_addresses\n\n @staticmethod\n def make(circ_id, other_or_address, this_or_addresses, timestamp=None,\n link_version=3):\n '''Build and return a Destroy cell, using default values where\n possible.\n\n Automatically create and use an appropriate FixedLenCell.Header.\n\n :param int circ_id: Circuit ID to use for this cell\n :param str timestamp: Time this `NetInfoCell` was created. Big-endian\n unsigned integer of seconds since the Unix epoch (packed\n format).\n :param oppy.cell.util.TLVTriple other_or_address: Public IP\n address of the recipient of this NetInfoCell.\n :param list, oppy.cell.util.TLVTriple this_or_addresses: List\n of the public IP address(es) of the originator of this\n NetInfoCell.\n :returns: :class:`~oppy.cell.fixedlen.NetInfoCell`\n '''\n h = FixedLenCell.Header(circ_id=circ_id,\n cmd=DEF.NETINFO_CMD,\n link_version=link_version)\n\n if timestamp is None:\n timestamp = struct.pack('!I', int(time.time()))\n\n if len(this_or_addresses) > MAX_THIS_OR_ADDRESSES:\n msg = \"oppy only supports up to 5 'this_or_addresses' in a \"\n msg += \"NetInfoCell, received: {}.\"\n msg = msg.format(MAX_THIS_OR_ADDRESSES)\n raise BadPayloadData(msg)\n\n return NetInfoCell(h, timestamp=timestamp,\n other_or_address=other_or_address,\n num_addresses=len(this_or_addresses),\n this_or_addresses=this_or_addresses)\n\n def getBytes(self, trimmed=False):\n '''Construct and return the byte string represented by this cell.\n\n :param bool trimmed: Whether or not we should pad this cell's bytes.\n If **True**, pad based on Link Protocol version in use.\n :returns: **str** raw bytes this cell represents\n '''\n ret = self.header.getBytes()\n ret += self.timestamp\n ret += self.other_or_address.getBytes()\n ret += struct.pack('!B', self.num_addresses)\n\n for TLVaddr in self.this_or_addresses:\n ret += TLVaddr.getBytes()\n\n if trimmed is True:\n return ret\n else:\n return FixedLenCell.padCellBytes(ret, self.header.link_version)\n\n def _parsePayload(self, data):\n '''Parse the string data and extract cell fields.\n\n :param str data: string to parse\n '''\n start, _ = self.payloadRange()\n offset = start\n\n self.timestamp = data[offset:offset + TIMESTAMP_LEN]\n offset += TIMESTAMP_LEN\n\n self.other_or_address = TLVTriple.parse(data, offset)\n offset += len(self.other_or_address)\n\n self.num_addresses = data[offset:offset + NUM_ADDRESSES_LEN]\n self.num_addresses = struct.unpack('!B', self.num_addresses)[0]\n offset += NUM_ADDRESSES_LEN\n\n if self.num_addresses > MAX_THIS_OR_ADDRESSES:\n msg = \"oppy only supports up to 5 'this_or_addresses' in a \"\n msg += \"NetInfoCell, received: {}.\"\n msg = msg.format(MAX_THIS_OR_ADDRESSES)\n raise BadPayloadData(msg)\n\n self.this_or_addresses = []\n for i in xrange(self.num_addresses):\n t = TLVTriple.parse(data, offset)\n self.this_or_addresses.append(t)\n offset += len(t)\n\n def __repr__(self):\n fmt = '{}, timestamp={}, other_or_address={}, '\n fmt += 'num_addresses={}, this_or_addresses={}'\n fmt = 'NetInfoCell({})'.format(fmt)\n return fmt.format(repr(self.header), repr(self.timestamp),\n repr(self.other_or_address),\n repr(self.num_addresses),\n repr(self.this_or_addresses))\n\n\nclass PaddingCell(FixedLenCell):\n '''.. note:: tor-spec, Section 3, 7.2.\n\n .. note:: Padding has no cell payload fields so, we just use inherited\n fields.\n\n '''\n @staticmethod\n def make(circ_id, link_version=3):\n '''Build and return a Padding cell, using default values where\n possible.\n\n Automatically create and use an appropriate FixedLenCell.Header.\n\n :param int circ_id: Circuit ID to use for this cell\n :param int link_version: Link Protocol version in use.\n :returns: :class:`~oppy.cell.fixedlen.NetInfoCell`\n '''\n h = FixedLenCell.Header(circ_id=circ_id,\n cmd=DEF.PADDING_CMD,\n link_version=link_version)\n PAD_BYTE = \"\\x00\"\n return PaddingCell(h, PAD_BYTE * DEF.MAX_PAYLOAD_LEN)\n","repo_name":"nskinkel/oppy","sub_path":"oppy/cell/fixedlen.py","file_name":"fixedlen.py","file_ext":"py","file_size_in_byte":25088,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"62"} +{"seq_id":"22771844242","text":"# Head Notes\n# Tensorflow doesn't like numpy 1.17 and gives a lot of warnings, to remove them use the following command:\n# pip install \"numpy<1.17\"\n\n# Python API\nimport os\nimport random\nimport time\nimport warnings\n# 3rd party API\nimport pickle\nimport numpy as np\nimport cv2\nfrom PIL import Image\nfrom skimage.transform import resize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer, StandardScaler, Normalizer\nfrom keras import Model\nfrom keras.layers.core import Dense\nfrom keras.layers import GlobalAveragePooling2D\nfrom keras.preprocessing.image import img_to_array, ImageDataGenerator\nfrom keras.applications.mobilenet_v2 import MobileNetV2\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau\nfrom keras.utils import plot_model\nfrom matplotlib import pyplot as plt\n\n#To remove warnings from the system\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# Variables\npath = 'Trashnet'\npathTrainResult = 'TrainResult'\nbatch_size = 12\nepochs = 500\nWIDTH = 192 # (224, 192, 160, 128, and 96).\nHEIGHT = WIDTH\nlista_imagens, x, y = [], [], []\nlb = LabelBinarizer()\n\n\nfor category in os.listdir(path):\n path_category = path + '/' + category\n folder = os.path.join(path_category)\n images = os.listdir(folder)\n print('{0} - {1}'.format(category, len(images)))\n for j in images:\n imagePath = os.path.join(folder + '/' + j)\n lista_imagens.append((imagePath, category))\nprint('Total: {0} imagens'.format(len(lista_imagens)))\n\nstart = time.time()\n\n# loop over the input images\nfor imagePath, category in lista_imagens:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (WIDTH, HEIGHT))\n image = img_to_array(image)\n x.append(image)\n y.append(category)\n\n\nend = time.time()\nprint('Resized images in {0} seconds'.format(round(end-start,0)))\n\n# transform multi-class labels to binary labels\ny = np.array(y)\ny = lb.fit_transform(y)\n\n# Generate test dataset\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n# Generate validation train and validation dataset\nX_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2)\n\nx_l = []\nfor i in X_train:\n x_l.append(i.reshape(-1))\nX_train = np.array(x_l)\nX_train.reshape(-1)\n\nx_l = []\nfor i in X_val:\n x_l.append(i.reshape(-1))\nX_val = np.array(x_l)\n\nx_l = []\nfor i in X_test:\n x_l.append(i.reshape(-1))\nX_test = np.array(x_l)\n\n\n# Image Standardization\nscaler = StandardScaler()\nscaler.fit(X_train)\nX_train = scaler.transform(X_train)\nX_val = scaler.transform(X_val)\nX_test = scaler.transform(X_test)\nprint('Standardized images')\n\n# Image Normalization\nscaler = Normalizer()\nscaler.fit(X_train)\nX_train = scaler.transform(X_train)\nX_val = scaler.transform(X_val)\nX_test = scaler.transform(X_test)\nprint('Normalized Images')\n\nX_trein = []\nfor i in X_train:\n X_trein.append(i.reshape((WIDTH,HEIGHT, 3)))\nX_train = np.array(X_trein)\n\nX_vali = []\nfor i in X_val:\n X_vali.append(i.reshape((WIDTH,HEIGHT, 3)))\nX_val = np.array(X_vali)\n\nX_teste = []\nfor i in X_test:\n X_teste.append(i.reshape((WIDTH,HEIGHT, 3)))\nX_test = np.array(X_teste)\n\n\nprint('Defining classifier')\nmobilenet = MobileNetV2(input_shape=(WIDTH, HEIGHT, 3), include_top=False, weights='imagenet')\n\nx = mobilenet.output\nx = GlobalAveragePooling2D()(x)\n\nx=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.\nx=Dense(1024,activation='relu')(x) #dense layer 2\nx=Dense(512,activation='relu')(x) #dense layer 3\n\npredictions = Dense(6, activation='softmax')(x)\nclassifier = Model(inputs= mobilenet.input, outputs=predictions)\nclassifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\nclassifier.summary()\nprint('Finished defining classifier')\n\nif not os.path.exists(pathTrainResult):\n os.makedirs(pathTrainResult)\n\n# construct the training image generator for data augmentation\naug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,\n\twidth_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,\n\thorizontal_flip=True, fill_mode=\"nearest\")\n\n\nfile_name = 'batch_{0}_shape_{1}'.format(batch_size, WIDTH)\n\n# to save best model\nbestcheckpoint = ModelCheckpoint(pathTrainResult + '/batch_'+ str(batch_size) +'_epochs_'+ str(epochs) +'_shape_'+ str(WIDTH) +'.h5', save_best_only=True, monitor='val_loss', mode='min')\ncallback = EarlyStopping(monitor='val_loss', min_delta=0, patience=30, mode='auto')\ncsv_logger = CSVLogger('PlotResults/batch_'+ str(batch_size) +'_epochs_'+ str(epochs) +'_shape_'+ str(WIDTH) +'_training.log')\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001)\nsteps = int( np.ceil(X_train.shape[0] / batch_size) )\n\n# fit() should be used for small datasets, loads everything into memory\n# fit_generator() should be used for larger datasets, which loads into memory only small batches of data.\nH = classifier.fit_generator(aug.flow(X_train, y_train, batch_size=batch_size), validation_data = (X_val, y_val),steps_per_epoch=steps, epochs = epochs, verbose = 1, callbacks=[bestcheckpoint, csv_logger, reduce_lr])\n\n# Plot training & validation accuracy values\nplt.figure()\nplt.plot(H.history['acc'])\nplt.plot(H.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Val'], loc='upper left')\nplt.savefig('PlotResults/{0}_accplot.png'.format(file_name)) \n\n# Plot training & validation loss values\nplt.figure()\nplt.plot(H.history['loss'])\nplt.plot(H.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Val'], loc='upper left')\nplt.savefig('PlotResults/{0}_lossplot.png'.format(file_name))\n\n# save One Hot Encoding\nf = open('{0}/{1}_one_hot_encoding.txt'.format(pathTrainResult, file_name), \"wb\") \nf.write(pickle.dumps(lb))\nf.close()\nprint(\"Saved One Hot Encoding to disk\")\n\n# save X_test\nf = open('{0}/{1}_X_test.txt'.format(pathTrainResult, file_name), \"wb\") \nf.write(pickle.dumps(X_test))\nf.close()\nprint(\"Saved X_test to disk\")\n\n# save y_test\nf = open('{0}/{1}_y_test.txt'.format(pathTrainResult, file_name), \"wb\") \nf.write(pickle.dumps(y_test))\nf.close()\nprint(\"Saved y_test to disk\")\n","repo_name":"antoniosequeira/trainer_mobilenet_v2","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15704450282","text":"# Logic and definitions used from multiple files.\n\nfrom collections import namedtuple\nimport datetime\nimport logging\nimport os\nimport pathlib\nimport pickle\nimport sys\n\nfrom annoy import AnnoyIndex\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom progress.bar import Bar\n\n# Silence chatty TF.\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\n# Embedding model metadata.\n_MODEL_DIR = 'imagenet_mobilenet_v2_140_224_feature_vector'\n_MODEL_INPUT_DIMS = (224, 224)\n_EMBED_IMAGE = hub.KerasLayer(_MODEL_DIR)\n\n_IMG_BORDER_THRESHOLD = 10\n_IMG_MIN_WIDTH = 224\n_IMG_MIN_HEIGHT = 224\n_IMG_MIN_RATIO = 1.30\n_IMG_MAX_RATIO = 2.30\n\n_CLOSE_BUF_SIZE = 10\n_CLOSE_TIME = datetime.timedelta(seconds=5.5)\n_CLOSE_TIME_FRACTION = 3\n_CLOSE_DISTANCE = 0.40\n_MAX_OUTLIERS = 3\n\n_SQUASH_EXP_FACTOR = 5.0\n\n_DIST_METRIC = 'angular'\n\n_SAMPLE_HZ = 4.0\n\nFEATURE_VECTOR_LEN = 1792\nRECORD_COMPRESSION = 'ZLIB'\n\nEmbeddedFrame = namedtuple('EmbeddedFrame',\n ['title', 'date', 'length', 'timestamp', 'features'])\n\n\n# Returns the \"min:sec\" formatted version of the given\n# duration.\ndef _format_duration(d):\n mins, secs = divmod(d.seconds, 60)\n return f'{mins}:{secs:02}'\n\n\n# Accepts a sorted list and a delta value, and returns the (inclusive) start\n# and end indicies of a maximal sublist whose range does not exceed the delta.\ndef _max_close_window(ts, delta):\n e = len(ts) - 1\n u, v = 0, 0\n best_u, best_v = 0, 0\n while v < e or ts[v] - ts[u] > delta:\n while ts[v] - ts[u] > delta:\n u += 1\n\n if v - u > best_v - best_u:\n best_u, best_v = u, v\n\n if v < e:\n v += 1\n\n return (best_u, best_v)\n\n\n# Searches the database for the datapoints nearest to the given query.\ndef _find_nearest_frames(mds, nn_db, query):\n # Get metadata indices of closest points. Stored as:\n # [index list, distance list].\n query_results = nn_db.get_nns_by_vector(query,\n _CLOSE_BUF_SIZE,\n include_distances=True)\n\n # Reconstruct records of closest points. Stored as (dist, datapoint) for\n # sorting purposes.\n return sorted((d, mds[i]) for i, d in zip(*query_results))\n\n\n# Accepts a list of candidate datapoints and synthesises them into one frame\n# selection.\ndef _choose_nearest_frame(close_pts):\n # Calculate frequency of individual titles.\n freq = {}\n for t in close_pts:\n title = t[1].title\n freq[title] = freq.get(title, []) + [t]\n\n # A title is better if it is more frequent than the others, or else if its\n # closest distance is less than the others.\n chosen_pts = max(freq.items(), key=lambda t: (len(t[1]), -t[1][0][0]))[1]\n chosen_frames = [c for (_, c) in chosen_pts]\n chosen_frames.sort(key=lambda c: c.timestamp)\n\n # Only a small number of titles may deviate from our modal title.\n outlier_limit = min((len(close_pts) + 1) // 2, _MAX_OUTLIERS)\n if len(chosen_frames) < len(close_pts) - outlier_limit:\n return None\n\n # Return the median close timestamp if many of the timestamps are close.\n chosen_ts = None\n dense_ts_u, dense_ts_v = _max_close_window(\n [c.timestamp for c in chosen_frames], _CLOSE_TIME)\n if 1 + dense_ts_v - dense_ts_u > len(chosen_pts) // _CLOSE_TIME_FRACTION:\n chosen_ts = chosen_frames[(dense_ts_u + dense_ts_v) // 2].timestamp\n\n chosen_frame = chosen_frames[0]\n return EmbeddedFrame(title=chosen_frame.title,\n date=chosen_frame.date,\n length=chosen_frame.length,\n timestamp=chosen_ts,\n features=None)\n\n\n# Converts a candidate title into an ad-hoc confidence score.\ndef _score_title(close_pts, title):\n # Squash distances into scores in (0, 1).\n scores = [1 - d / _CLOSE_DISTANCE for d, c in close_pts if c.title == title]\n\n # Pull scores towards 1.\n return (sum(scores) / _CLOSE_BUF_SIZE)**(1 / _SQUASH_EXP_FACTOR)\n\n\n# Accepts a PIL image and returns a version of the image with any black border\n# removed. A pixel is considered \"black\" if all of its channels have magnitude\n# less than a small threshold.\ndef _crop_image_border(image):\n y_light, x_light, _ = (np.asarray(image) > _IMG_BORDER_THRESHOLD).nonzero()\n\n # All black.\n if len(y_light) == 0:\n return Image.new('RGB', (0, 0))\n\n return image.crop(\n (np.min(x_light), np.min(y_light), np.max(x_light), np.max(y_light)))\n\n\n# Accepts a PIL image and returns the image in a tensor of the format needed by\n# the emebdding model (or None if the image isn't suitable for input).\ndef _image_to_tensor(image):\n cropped_image = _crop_image_border(image.convert('RGB'))\n image_w, image_h = cropped_image.size\n\n # Can't scale the image if it would mutate it too much.\n if image_w < _IMG_MIN_WIDTH or image_h < _IMG_MIN_HEIGHT or \\\n not (_IMG_MIN_RATIO < image_w / image_h < _IMG_MAX_RATIO):\n return None\n\n formatted_image = cropped_image.resize(_MODEL_INPUT_DIMS, Image.NEAREST)\n image_array = tf.keras.utils.img_to_array(formatted_image)\n return tf.expand_dims(image_array, 0)\n\n\n# Accepts a PIL image and returns its corresponding vector in the embedding\n# space, if it is viable to embed.\ndef embed_image(image):\n image_tensor = _image_to_tensor(image)\n if image_tensor is None:\n return None\n\n return _EMBED_IMAGE(image_tensor).numpy()[0]\n\n\n# Returns a string representing the given datapoint.\ndef format_datapoint(d):\n date_str = d.date.strftime('%-m %b %Y')\n\n if d.timestamp:\n length_str = _format_duration(d.length)\n timestamp_str = _format_duration(d.timestamp)\n return f'**{d.title}** [{timestamp_str}/{length_str}] ({date_str})'\n\n return f'**{d.title}** ({date_str})'\n\n\n# Accepts a database and image contents and returns either:\n# a) a guess of the frame that the image most closely resembles and an ad-hoc\n# confidence value for the match, or\n# b) None, if there is no close match.\ndef find_nearest_frame(mds, nn_db, image_bytes):\n # Generate query datapoint.\n query_features = embed_image(Image.open(image_bytes))\n if query_features is None:\n return None\n\n # Search the database.\n closest = _find_nearest_frames(mds, nn_db, query_features)\n\n ## Debug output.\n #print()\n #for d, c in closest:\n # print(f'{format_datapoint(c)} ')\n\n # Only consider close datapoints.\n close_enough = [t for t in closest if t[0] <= _CLOSE_DISTANCE]\n close_enough.sort()\n if not close_enough:\n return None\n\n # Synthesise close points into one selection.\n chosen = _choose_nearest_frame(close_enough)\n if not chosen:\n return None\n\n # Include a confidence score.\n return chosen, _score_title(close_enough, chosen.title)\n\n\n# Initializes a NN database with the parameters needed for our embedding.\ndef init_nn_db():\n return AnnoyIndex(FEATURE_VECTOR_LEN, _DIST_METRIC)\n\n\n# Parse date and video title from full path, with stem in the\n# in the format:\n# YYYY-MM-DD NAME\n# Returns (name, date) tuple.\ndef parse_video_path(path):\n path_stem = pathlib.Path(path).stem\n sep_index = path_stem.find(' ')\n date = datetime.datetime.strptime(path_stem[:sep_index], '%Y-%m-%d')\n return (date, path_stem[sep_index + 1:])\n\n\n# Embeds a sample of video frames and runs the specified function on the result.\ndef embed_video_frames(video_path, process_fn, bar):\n date, title = parse_video_path(video_path)\n\n # Read video.\n cap = cv2.VideoCapture(video_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n if fps == 0.0:\n print()\n print(f'Error can\\'t read FPS for \"{title}\".')\n return\n\n frame_stride = fps / _SAMPLE_HZ\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Process frames for this video.\n for frame_index in np.arange(0.0, frame_count, frame_stride):\n if bar:\n bar.next(frame_stride / frame_count)\n\n # Jump to and read frame.\n cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame_index))\n ret, frame = cap.read()\n if not ret:\n print()\n print(f'Failed to read \"{title}\" frame {frame_index}.')\n continue\n\n # Massage frame into format required by TF.\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n embedded = embed_image(Image.fromarray(frame))\n if embedded is None:\n continue\n\n # Pass data to processer function.\n record = EmbeddedFrame(\n title=title,\n date=date,\n length=datetime.timedelta(seconds=frame_count / fps),\n timestamp=datetime.timedelta(seconds=frame_index / fps),\n features=embedded)\n process_fn(record)\n\n\n# Print a pretty progress bar with max value 1.0.\ndef progress_bar(title):\n bar = Bar(f'{title:40.40}', max=1.0, suffix='%(percent)d%%')\n bar.bar_prefix = ' ['\n bar.bar_suffix = '] '\n return bar\n","repo_name":"mjmartis/gully-bot","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":9083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23653015520","text":"\"\"\"A database encapsulating collections of near-Earth.\"\"\"\n\n\nclass NEODatabase:\n \"\"\"A database of near-Earth objects and their close approaches.\"\"\"\n\n def __init__(self, neos, approaches):\n \"\"\"Create a new `NEODatabase`.\"\"\"\n self._neos = neos\n self._approaches = approaches\n self.neos_dict_des = dict()\n self.neos_dict_name = dict()\n for neo in self._neos:\n self.neos_dict_des[neo.designation] = neo\n if neo.name:\n self.neos_dict_name[neo.name] = neo\n for approach in self._approaches:\n approach.neo = self.neos_dict_des[approach._designation]\n self.neos_dict_des[approach._designation].approaches.append(\n approach\n )\n\n def get_neo_by_designation(self, designation):\n \"\"\"Find and return an NEO by its primary designation.\"\"\"\n return self.neos_dict_des.get(designation, None)\n\n def get_neo_by_name(self, name_neo):\n \"\"\"Find and return an NEO by its name.\"\"\"\n return self.neos_dict_name.get(name_neo, None)\n\n def query(self, filters=()):\n \"\"\"Query close approaches.\"\"\"\n if len(filters) == 0 and len(filters) < 0:\n yield from self._approaches\n return\n else:\n for approach in self._approaches:\n flg = True\n for filter_func in filters:\n if not filter_func(approach):\n flg = False\n break\n if flg:\n yield approach\n","repo_name":"namvt002/Udacity_python","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5888621444","text":"\"\"\"\nThis module defines test cases for index rest API\n\"\"\"\n# pylint: disable=cyclic-import\nimport unittest\nfrom library_app import app\nfrom .test_base import Base\n\n\nclass IndexAPITest(Base):\n \"\"\"\n Class for index rest API test cases\n \"\"\"\n def test_index(self):\n \"\"\"\n Test if /api is working, test for 200 status code\n \"\"\"\n tester = app.test_client()\n response = tester.get('/api/', follow_redirects=True)\n statuscode = response.status_code\n self.assertEqual(statuscode, 200)\n\n def test_index_content_type(self):\n \"\"\"\n Test if /api return correct content type\n \"\"\"\n tester = app.test_client()\n response = tester.get('/api', follow_redirects=True)\n content_type = response.content_type\n self.assertEqual(content_type, \"application/json\")\n\n def test_index_data(self):\n \"\"\"\n Test for correct content in response\n \"\"\"\n tester = app.test_client()\n response = tester.get('/api', follow_redirects=True)\n self.assertTrue(b'resources' in response.data)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"lordwerneo/epam-library","sub_path":"library_app/tests/test_index_rest.py","file_name":"test_index_rest.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30035161562","text":"#! /usr/bin/python\n#\n# Sensorino smarthome server\n#\n# Author: Andrew Zaborowski \n#\n# This software is provided under the 3-clause BSD license.\n#\n# This script simulates a trivial sensor node setup so that the server\n# can be dry-run tested easily. When the server is running, start this\n# script to make it connect to localhost port 8888 and make it appear as\n# if a Base node was present and able to communicate over-the-air with\n# a single Sensorino node at address 10. That remote node has two\n# services:\n# * a light switch, which someone stubbornly keeps flipping\n# every 20 seconds indefinitely.\n# * a relay that controls the light in the room.\n\nimport time\nimport sys\n\nimport base_lib\n\nnode_addr = 10\n\nrelay_svcs = [ 2 ]\nswitch_svcs = [ 3 ]\n\nclass relay_channel(base_lib.base_channel):\n\tdef set_value(self, val):\n\t\tbase_lib.base_channel.set_value(self, val)\n\t\tif val:\n\t\t\tsys.stderr.write('It\\'s bright again.\\n')\n\t\telse:\n\t\t\tsys.stderr.write('It\\'s dark now.\\n')\n\nnode = base_lib.base_create_node(node_addr)\n\nswitches = [ node.create_service(svc_id).create_channel('switch', False, True) \\\n\tfor svc_id in switch_svcs ]\n\nfor svc_id in relay_svcs:\n\tnode.create_service(svc_id).add_channel(relay_channel('switch', \\\n\t\t\tTrue, True))\n\ndef handle_20s_timeout():\n\tswitches[0].publish_value(not switches[0].get_value())\n\nbase_lib.base_init('base-test')\nlast_ts = time.time()\nwhile 1:\n\tnow = time.time()\n\ttimeout = 20.0 - (now - last_ts)\n\tif timeout < 0:\n\t\thandle_20s_timeout()\n\t\tlast_ts = now\n\t\tcontinue\n\n\ttry:\n\t\tbase_lib.base_run(timeout)\n\texcept KeyboardInterrupt:\n\t\tbreak\n\texcept base_lib.BaseDisconnect:\n\t\tbreak\n\nbase_lib.base_done()\n","repo_name":"Sensorino/sensorino-smarthome","sub_path":"base-test.py","file_name":"base-test.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"15273210999","text":"from flask import Flask, render_template, request, redirect, session\nfrom random import randint\napp = Flask(__name__)\napp.secret_key = 'totally a secret'\n\n\nmoves = {0: 'rock', 1: 'paper', 2: 'scissors'}\noutcomes = {\"0,0\": \"tied\", \"0,1\": \"lose\", \"0,2\": \"won\", \"1,0\": \"won\", \"1,1\": \"tied\", \"1,2\": \"lose\", \"2,0\": \"lose\", \"2,1\": \"won\", \"2,2\": \"tied\"}\n\n\ndef play_rps(user_move, cpu_move):\n return outcomes[f\"{user_move},{cpu_move}\"]\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/choice', methods=['POST'])\ndef choice():\n session[\"cpu\"] = randint(0, 2)\n session[\"user\"] = request.form[\"choice\"]\n return redirect('/result')\n\n\n@app.route('/result')\ndef result():\n if \"cpu\" in session.keys() and \"user\" in session.keys():\n return render_template('result.html', result=play_rps(session[\"user\"], session[\"cpu\"]), comp_move=moves[session[\"cpu\"]], your_move=moves[int(session[\"user\"])])\n else:\n return redirect('/')\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n","repo_name":"GoatDragon/CodingDojo","sub_path":"python_stack/flask/fundamentals/rock_paper_scissors/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"11459291249","text":"import numpy as np\nfrom function import ObjectiveFunction\n\n\nclass Solver:\n def __init__(self, function: ObjectiveFunction):\n self.objective_function = function\n self.init_w = function.init_w\n self.opt_w = function.opt_w\n self.f = function.f\n self.df = function.df\n self.ddf = function.ddf\n\n def steepest_descent_method(self, lam, max_iter=20):\n w = self.init_w\n step_size = 1 / np.abs(np.max(np.linalg.eig(self.ddf(w, lam))[0]))\n error = []\n y = []\n for k in range(max_iter + 1):\n error.append(self.calc_error(w))\n y.append(self.f(w, lam))\n w = w - self.df(w, lam) * step_size\n return error, y\n\n def steepest_descent_method_with_armijo_rule(self, lam, max_iter=20, alpha=1, xi=1e-3, tau=0.5):\n w = self.init_w\n step_size = alpha\n error = []\n y = []\n for k in range(max_iter + 1):\n while self.f(w - step_size * self.df(w, lam), lam) > \\\n self.f(w, lam) - xi * step_size * self.df(w, lam).T @ \\\n self.df(w, lam):\n step_size *= tau\n error.append(self.calc_error(w))\n y.append(self.f(w, lam))\n w = w - self.df(w, lam) * step_size\n return error, y\n\n def nesterovs_accelerated_gradient_algorithm(self, lam, max_iter=20):\n w = self.init_w\n w_bf = self.init_w\n step_size = 1 / np.abs(np.max(np.linalg.eig(self.ddf(w, lam))[0]))\n beta = 0\n error = []\n y = []\n for k in range(max_iter + 1):\n error.append(self.calc_error(w))\n y.append(self.f(w, lam))\n (w, w_bf) = (w + beta * (w - w_bf) - step_size * self.df(w + beta * (w - w_bf), lam), w.copy())\n beta = k / (k + 3)\n return error, y\n\n def calc_error(self, w):\n if self.opt_w is not None:\n return np.linalg.norm(w - self.opt_w)\n else:\n return None\n","repo_name":"onioni0202/mathematical_optimization_2022S_Q1","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8226696891","text":"import sys\nsys.stdin=open('input.txt', 'r') # input.txt에서 불러온 값으로 input을 대체한다\n\nt=int(input()) # t = 테스트 케이스 = input 받은 1자리 정수\n\nfor p in range(1, t+1): # 테스트 케이스를 순환. 동시에 print할 때 p를 변수로 표시할 것이기 때문에 1부터 시작해서 t+1로 range를 잡음\n s=input() # 알파벳 대문자로 이루어진 4글짜자리 문자열 s를 입력받음\n s1=list(set(s)) # s에서 중복을 제거한 값을 리스트로 만든다\n if len(s1) == 2: # 조건문 작성. s1에 남은 길이가 2이고\n if s.count(s1[0]) == s.count(s1[1]) == 2: # 동시에 s에서 s1의 첫번째, 두 번째 글자를 세었을 때 2가 나오는 걸 만족하면\n print(f'#{p}', 'Yes') # f스트링을 사용하여 #p Yes를 출력하고\n else:\n print(f'#{p}', 'No') # 각 요소의 카운트 값이 2가 아닐 경우 #p No를 출력하고\n else:\n print(f'#{p}', 'No') # 중복 제거 후의 리스트 길이가 3가 아닐 경우 #p No를 출력한다.\n\n# if문 순서를 바꾸면 탐색 범위를 벗어났다고 나온다.\n# 예시중에 len(s1)=1 인게 있어서 카운트 범위를 벗어나기 때문인듯","repo_name":"nihelv/algorithm","sub_path":"Examples/반반.py","file_name":"반반.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29854598174","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 7 13:00:29 2020\n\n@author: ricardo\n\"\"\"\n\n\nimport pandas as pd\nfrom func import sdss_spec\n#test_size = 20, 25, 30\n\nsavein = True\n\n \ntfp_tfn_splus_r = pd.read_csv('tfp_tfn_splus_r_v3.csv')\nlista_pmf_splus_r = sdss_spec(tfp_tfn_splus_r, 'specs S-PLUS r', save = savein)\n\ntfp_tfn_gaia = pd.read_csv('tfp_tfn_gaia_v3.csv')\nlista_pmf_gaia = sdss_spec(tfp_tfn_gaia, 'specs GAIA', save = savein)\n\ntfp_tfn_gaia_splus = pd.read_csv('tfp_tfn_gaia_splus_r_v3.csv')\nlista_pmf_gaia_splus = sdss_spec(tfp_tfn_gaia_splus, 'specs GAIA + S-PLUS r',\\\n save = savein)\n","repo_name":"Zericardos/IC-2020-final","sub_path":"sdss_spectra_v3.py","file_name":"sdss_spectra_v3.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35110586483","text":"class Node: \n def __init__(self, data): \n self.data = data \n self.next = None\n \n\nclass Queue: \n \n def __init__(self): \n self.top = self.bottom = None\n \n def isEmpty(self): \n return self.top == None\n\n def EnQueue(self, item): \n newNode = Node(item) \n \n if self.bottom == None: \n self.top = self.bottom = newNode \n return\n self.bottom.next = newNode \n self.bottom = newNode \n \n def DeQueue(self): \n \n if self.isEmpty(): \n return\n newNode = self.top \n self.top = newNode.next\n \n if(self.top == None): \n self.bottom = None\n \n\nq = Queue() \nq.EnQueue(10) \nq.EnQueue(20) \nq.EnQueue(30) \nq.EnQueue(40) \nq.EnQueue(50) \nq.DeQueue() \nprint(\"Queue top \" + str(q.top.data)) \nprint(\"Queue bottom \" + str(q.bottom.data)) \n ","repo_name":"clouds16/data-structures","sub_path":"week4/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40641829043","text":"def Sreverse(iterable):\n stack = []\n \n #pushing\n for i in iterable:\n stack.append(i)\n\n reversedString = \"\"\n reversedList = []\n\n #poping\n for i in range(len(stack)):\n if type(iterable) == str:\n i = stack.pop()\n reversedString += i\n\n if type(iterable) == list:\n i = stack.pop()\n reversedList.append(i)\n\n return reversedString if type(iterable) == str else reversedList\n\n\n\nprint(Sreverse(\"mahdi\"))\nprint(Sreverse([1,2,3,4]))\n","repo_name":"mahdi-ebrahimi-per/Data-Structures","sub_path":"Stack/reverse using stack.py","file_name":"reverse using stack.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42569846117","text":"from django import forms\nfrom django.utils.text import slugify\nfrom hitcount.views import HitCountMixin\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom modelcluster.fields import ParentalManyToManyField, ParentalKey\nfrom taggit.models import TaggedItemBase, Tag as TaggitTag\nfrom wagtail.admin.edit_handlers import StreamFieldPanel, MultiFieldPanel\nfrom wagtail.core.blocks import StreamBlock\nfrom wagtail.core.fields import StreamField\nfrom wagtail.core.models import Page\nfrom wagtail.snippets.edit_handlers import SnippetChooserPanel\nfrom wagtailmetadata.models import MetadataPageMixin\n\nfrom .blocks import *\nfrom .categories.models import *\nfrom .articles.blocks import *\n\n\nclass HomePage(Page):\n subpage_types = [\n 'home.ArticlesCategoryPage'\n ]\n\n\nclass ArticlesCategoryPage(MetadataPageMixin, Page):\n category = models.ForeignKey(\n ArticleCategory, blank=False, on_delete=models.SET_NULL, null=True\n )\n\n content_panels = [\n SnippetChooserPanel('category')\n ]\n\n def get_home_page(self):\n return self.get_parent().specific\n\n promote_panels = []\n settings_panels = []\n\n def clean(self):\n super().clean()\n self.title = self.category.name\n self.slug = slugify(self.title, allow_unicode=True)\n\n parent_page_types = [\n 'home.HomePage'\n ]\n subpage_types = [\n 'home.ArticlePage'\n ]\n\n\nclass ArticleTag(TaggedItemBase):\n content_object = ParentalKey(\n 'ArticlePage', related_name='article_tags', on_delete=models.CASCADE\n )\n\n\nclass ArticlePage(MetadataPageMixin, HitCountMixin, Page):\n image = models.ForeignKey(\n 'wagtailimages.Image',\n help_text='square image',\n null=True, blank=True, on_delete=models.SET_NULL, related_name='+'\n )\n categories = ParentalManyToManyField(ArticleCategory, blank=True)\n tags = ClusterTaggableManager(\n through=ArticleTag, blank=True\n )\n\n summary = StreamField(\n StreamBlock(\n [\n ('summary', SummaryBlock())\n ], min_num=1, max_num=1, required=True\n ), blank=False, null=True\n )\n introduction = StreamField(\n StreamBlock(\n [\n ('introduction', IntroductionBlock())\n ], min_num=1, max_num=1, required=True\n ), blank=False, null=True\n )\n conclusion = StreamField(\n StreamBlock(\n [\n ('conclusion', ConclusionBlock())\n ], min_num=0, max_num=1, required=True\n ), blank=False, null=True\n )\n\n paragraphs = StreamField([\n ('paragraph', ParagraphBlock()),\n ('image_paragraph', ImageParagraphBlock()),\n ('linkable_image_paragraph', LinkableImageParagraph()),\n ('linkable_paragraph', LinkableParagraph())\n ], blank=True)\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n ImageChooserPanel('image'),\n FieldPanel('categories', widget=forms.CheckboxSelectMultiple),\n FieldPanel('tags'),\n ], heading='Details', classname=\"collapsible collapsed\"),\n MultiFieldPanel([\n StreamFieldPanel('summary'),\n StreamFieldPanel('introduction'),\n StreamFieldPanel('paragraphs'),\n StreamFieldPanel('conclusion'),\n ], heading='Paragraphs', classname=\"collapsible collapsed\"),\n ]\n\n promote_panels = Page.promote_panels\n settings_panels = []\n\n def get_summary(self):\n return self.summary[0].value['paragraph']\n\n def get_introduction(self):\n return self.introduction[0].value['paragraph']\n\n def get_conclusion(self):\n return self.conclusion[0].value['paragraph']\n\n def get_context(self, request):\n linkable_paragraphs = []\n for paragraph in self.paragraphs:\n block_type = paragraph.block_type\n if 'linkable' in block_type:\n linkable_paragraphs.append(paragraph)\n context = super().get_context(request)\n context['linkable_paragraphs'] = linkable_paragraphs\n return context\n\n def get_home_page(self):\n return self.get_parent().specific.get_home_page()\n\n parent_page_types = [\n 'home.ArticlesCategoryPage'\n ]\n subpage_types = []\n","repo_name":"VahediRepositories/DoctorBit","sub_path":"doctorhub/home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72448679236","text":"# from time import sleep\nfrom time import sleep\nimport RPi.GPIO as GPIO \nimport pigpio\n\nbase = 18\nelbow = 19\npwm = pigpio.pi()\npwm.set_mode(base, pigpio.OUTPUT)\npwm.set_mode(elbow, pigpio.OUTPUT)\n\npositions = [700, 1000, 1500, 2000, 2300, 2000, 1500, 1000, 700]\n\ntry:\n for i in positions:\n pwm.set_servo_pulsewidth(base, i)\n for j in positions:\n pwm.set_servo_pulsewidth(elbow, j)\n print(f'{i}, {j}')\n sleep(1)\n\nexcept KeyboardInterrupt:\n # turning off servo\n pwm.set_PWM_dutycycle(base, 0)\n pwm.set_PWM_frequency(base, 0 )\n pwm.set_PWM_dutycycle(elbow, 0)\n pwm.set_PWM_frequency(elbow, 0 )\n GPIO.cleanup()\n print(\"GPIO cleanup complete.\")\nfinally:\n pwm.set_PWM_dutycycle(base, 0)\n pwm.set_PWM_frequency(base, 0 )\n pwm.set_PWM_dutycycle(elbow, 0)\n pwm.set_PWM_frequency(elbow, 0 )\n GPIO.cleanup()\n print(\"GPIO cleanup complete.\")\n\n# GPIO.setmode(GPIO.BCM)\n\n# BASE_SERVO = 18\n# # ELBOW_SERVO = 22\n\n# GPIO.setup(BASE_SERVO, GPIO.OUT)\n# # GPIO.setup(ELBOW_SERVO, GPIO.OUT)\n\n# base_handler = GPIO.PWM(BASE_SERVO, 1000)\n# # elbow_handler = GPIO.PWM(ELBOW_SERVO, 1000)\n\n# base_handler.start(0)\n\n# try: \n# while True:\n# for i in range(100):\n# print(i)\n# base_handler.ChangeDutyCycle(i)\n# sleep(0.2)\n\n# except KeyboardInterrupt:\n# base_handler.stop()\n# # elbow_handler.stop()\n# GPIO.cleanup()\n# print(\"GPIO cleanup complete.\")\n","repo_name":"fuzzygreenblurs/3dof_control","sub_path":"rough/servos.py","file_name":"servos.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31964730936","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom .managers import VocabManager\n\n\nclass Vocabulary(models.Model):\n word = models.CharField(max_length=100)\n translation = models.TextField()\n review_count = models.PositiveIntegerField(blank=True, default=0)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n objects = VocabManager()\n\n def review(self):\n self.review_count += 1\n self.save()\n return self.review_count\n \n def __str__(self):\n return self.user.username\n\n\nclass Sentence(models.Model):\n text = models.TextField()\n translation = models.TextField()\n vocabulary = models.ForeignKey(Vocabulary, on_delete=models.CASCADE, \n related_name='sentences')\n\n def __str__(self):\n return f'{self.text[:20]}...'","repo_name":"alirahmnicode/learnewgu","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3751381389","text":"import json\nfrom typing import Any\nfrom unittest.mock import AsyncMock, Mock, patch\n\nimport pytest\nfrom application import nats_error_response\nfrom application.repositories import hawkeye_repository as hawkeye_repository_module\nfrom application.repositories.hawkeye_repository import HawkeyeRepository\nfrom config import testconfig\nfrom nats.aio.msg import Msg\nfrom shortuuid import uuid\n\nuuid_ = uuid()\nuuid_mock = patch.object(hawkeye_repository_module, \"uuid\", return_value=uuid_)\n\n\n@pytest.fixture(scope=\"function\")\ndef hawkeye_repository():\n return HawkeyeRepository(\n nats_client=Mock(),\n notifications_repository=Mock(),\n )\n\n\ndef to_json_bytes(message: dict[str, Any]):\n return json.dumps(message, default=str, separators=(\",\", \":\")).encode()\n\n\nclass TestHawkeyeRepository:\n def instance_test(self):\n nats_client = Mock()\n notifications_repository = Mock()\n\n hawkeye_repository = HawkeyeRepository(nats_client, notifications_repository)\n\n assert hawkeye_repository._nats_client is nats_client\n assert hawkeye_repository._notifications_repository is notifications_repository\n\n @pytest.mark.asyncio\n async def get_probes_ok_test(self, hawkeye_repository):\n request = {\n \"request_id\": uuid_,\n \"body\": {},\n }\n response = {\n \"request_id\": uuid_,\n \"body\": [\n {\n \"probeId\": \"27\",\n \"uid\": \"b8:27:eb:76:a8:de\",\n \"os\": \"Linux ARM\",\n \"name\": \"FIS_Demo_XrPi\",\n \"testIp\": \"none\",\n \"managementIp\": \"none\",\n \"active\": \"1\",\n \"type\": \"8\",\n \"mode\": \"Automatic\",\n \"n2nMode\": \"1\",\n \"rsMode\": \"1\",\n \"typeName\": \"xr_pi\",\n \"serialNumber\": \"B827EB76A8DE\",\n \"probeGroup\": \"FIS\",\n \"location\": \"\",\n \"latitude\": \"0\",\n \"longitude\": \"0\",\n \"endpointVersion\": \"9.6 SP1 build 121\",\n \"xrVersion\": \"4.2.2.10681008\",\n \"defaultInterface\": \"eth0\",\n \"defaultGateway\": \"192.168.90.99\",\n \"availableForMesh\": \"1\",\n \"lastRestart\": \"2020-10-15T02:13:24Z\",\n \"availability\": {\"from\": 1, \"to\": 1, \"mesh\": \"1\"},\n \"ips\": [\"192.168.90.102\", \"192.226.111.211\"],\n \"userGroups\": [\"1\", \"10\"],\n \"wifi\": {\n \"available\": 0,\n \"associated\": 0,\n \"bssid\": \"\",\n \"ssid\": \"\",\n \"frequency\": \"\",\n \"level\": \"0\",\n \"bitrate\": \"\",\n },\n \"nodetonode\": {\"status\": 0, \"lastUpdate\": \"2020-11-06T10:38:07Z\"},\n \"realservice\": {\"status\": 0, \"lastUpdate\": \"2020-10-15T02:18:28Z\"},\n }\n ],\n \"status\": 200,\n }\n NATS_AIO_MSG = Msg(_client=\"NATS\", data=to_json_bytes(response))\n hawkeye_repository._nats_client.request = AsyncMock(return_value=NATS_AIO_MSG)\n\n with uuid_mock:\n result = await hawkeye_repository.get_probes()\n\n hawkeye_repository._nats_client.request.assert_awaited_once_with(\n \"hawkeye.probe.request\", to_json_bytes(request), timeout=120\n )\n assert result == response\n\n @pytest.mark.asyncio\n async def get_probes_with_request_failing_test(self, hawkeye_repository):\n request = {\n \"request_id\": uuid_,\n \"body\": {},\n }\n\n hawkeye_repository._nats_client.request = AsyncMock(side_effect=Exception)\n\n hawkeye_repository._notifications_repository.send_slack_message = AsyncMock()\n\n with uuid_mock:\n result = await hawkeye_repository.get_probes()\n\n hawkeye_repository._nats_client.request.assert_awaited_once_with(\n \"hawkeye.probe.request\", to_json_bytes(request), timeout=120\n )\n hawkeye_repository._notifications_repository.send_slack_message.assert_awaited_once()\n assert result == nats_error_response\n\n @pytest.mark.asyncio\n async def get_probes_with_request_returning_non_2xx_status_test(self, hawkeye_repository):\n request = {\n \"request_id\": uuid_,\n \"body\": {},\n }\n response = {\n \"request_id\": uuid_,\n \"body\": \"Got internal error from Hawkeye\",\n \"status\": 500,\n }\n NATS_AIO_MSG = Msg(_client=\"NATS\", data=to_json_bytes(response))\n hawkeye_repository._nats_client.request = AsyncMock(return_value=NATS_AIO_MSG)\n\n hawkeye_repository._notifications_repository.send_slack_message = AsyncMock()\n\n with uuid_mock:\n result = await hawkeye_repository.get_probes()\n\n hawkeye_repository._nats_client.request.assert_awaited_once_with(\n \"hawkeye.probe.request\", to_json_bytes(request), timeout=120\n )\n hawkeye_repository._notifications_repository.send_slack_message.assert_awaited_once()\n assert result == response\n","repo_name":"Bruin-Dev/Intelygenz","sub_path":"services/hawkeye-outage-monitor/src/tests/repositories/hawkeye_repository_test.py","file_name":"hawkeye_repository_test.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29898383140","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\n#<---------------definicion de la funcion da/de-------------->\ndef dotx(x,t):\n a = x[0]\n e = x[1]\n return [-(16/(5*a**3))*(1+(73/24)*e**2+(37/96)*e**4)/((1-e**2)**(7/2)),\n -(76/(15*a**4))*e*(1+(121/304)*e**2)/((1-e**2)**(5/2))]\n#<-----------------definicion del periodo orbital y el semieje mayor------------------->\ndef a(T_s):\n return (m_sol*M*(c*T_s/(2*np.pi))**2)**(1/3)\n#<..................definicion del periodo orbital y el semieje mayor----------------->\ndef T(a_m):\n return (2*np.pi/c)*(a_m**3/(M*m_sol))**(1/2)\n#<---------------definicion de la funcion que realiza la solucion de la ecuacion diferecnial-------------->\ndef solucion(x0,tt_int):\n #<------------Solucion de la ecuacion diferencial---------------------->\n sol = odeint(dotx,x0,tt_int)\n at_todos = sol[:,0]\n # verifica si at llega a 2. En caso positivo corta el arreglo de soluciones\n restriccion = np.where(at_todos<2)[0]\n if len(restriccion)!=0:\n #<------------------Deternima el tiempo donde at=2------------->\n pos_ttmax = restriccion[0]\n print('Acortando intervalo a tt_max = '+str(tt_int[pos_ttmax]))\n else: \n pos_ttmax = len(tt_int)\n tt = tt_int[:pos_ttmax]\n #<------------tiempo en años--------------->\n t_a = tt*R_ast/c/31557600\n at = sol[:pos_ttmax,0]\n e = sol[:pos_ttmax,1]\n #<-------------soluciion de a en metros------------------>\n a_m = at*R_ast\n #<--------------solucion de T en segundos-------------------->\n T_s = T(a_m)\n return tt,t_a,at,e,a_m,T_s\n#<-----------Definicion de la funcion g(e)------------------>\ndef g(e):\n return e**(12/19)*(1+121*e**2/304)**(870/2299)/(1-e**2)\ndir_graphics=\"../images/\"\n#<--------------Periodo inicial en dias---------------->\nT0_d = 0.322997448911\n#<-----------------Excentricidad inicial------------------->\ne0 = 0.6171334\n#<--------Masa del cuerpo orbitante en masas solares---------->\nM_c = 1.3886\n#<--------------------------Masa del pulsar en masas solares------------->\nM_p = 1.4398\n#---------------------Raṕidez de la luz en m/s---------------------->\nc = 299792458\n#<------------------- MG/C^3------------------------>\nMGcm3 = 4.925490947E-6\n#<-----Para metro de la masa del sol en metros m=GM/c^2---------------------->\nm_sol = MGcm3*c\n#<-------------Masa total en masas solares------------------------>\nM = M_c+M_p\n#<-----------------Masa reducida en masas solares------------------>\nmu = (M_c*M_p)/M\n#<--------------R* en metros------------------------->\nR_ast = m_sol*(4*mu*M**2)**(1/3)\n#<-------------Periodo inicial en segundos------------------>\nT0_s = T0_d*86400\n#<----------------Semieje mayor a en metros----------------->\na0_m = a(T0_s)\n#<-------a adimensional inicial----------------------------->\nat0 = a0_m/R_ast\n#<----------Tiempo adimensional------------------->\ntt_int_max = 10**22\n#<-------------Tiempos de calculo-------------------------->\ntt_int = np.linspace(0,tt_int_max,100000)\n#<--------------Tiempo de integracion en años-------------------->\nt_max_a = 30\n#<------------------------Valores iniciales---------------------->\nx0 = [at0,e0]\n#<---------------------Calculo de la solucion---------------------->\ntt,t_a,at,e,a_m,T_s = solucion(x0,tt_int) \n#<--------------------Valores rescalados---------------------->\na_m=a_m*1e-9\nt_a=t_a*1e-8\n#<---------Ploteo del semieje mayor con la excentricidad en el tiempo----------->\nplt.plot(t_a,a_m,color=\"#006466\",lw=3,label=\"semieje mayor\")\nplt.plot(t_a,e,color=\"#52b788\",lw=3,label=\"excentricidad\")\nplt.xticks(np.arange(0,3+0.25,0.25));plt.yticks(np.arange(0,2+0.2,0.2))\nplt.xlim(0,3);plt.ylim(0,2)\n#<-------------Ploteo de los label--------------->\nplt.xlabel('tiempo ($10^8$)');plt.ylabel('Distancia ($10^9$m) / excentricidad')\nplt.grid(ls=\"--\",color=\"grey\")\nplt.legend(frameon=False,ncol=2,bbox_to_anchor=(0, 1,1,0.02),mode=\"expand\")\nplt.subplots_adjust(left=0.124,bottom=0.14,right=0.964,top=0.894)\nplt.savefig(dir_graphics+\"a_adim.png\",dpi=200)\nplt.clf()\n#<--------------Ploteo de la evolucion del periodo--------------->\nT_h = T_s/3600\nplt.plot(t_a,T_h,lw=3,color=\"#006466\")\nplt.xlim(0,3);plt.ylim(0,8)\nplt.xticks(np.arange(0,3+0.25,0.25))\nplt.yticks(np.arange(0,8+0.5,0.5))\nplt.xlabel('tiempo $(años 10^{8})$');plt.ylabel('Periodo (horas)')\nplt.grid(ls=\"--\",color=\"grey\")\nplt.subplots_adjust(left=0.098,bottom=0.11,right=0.957,top=0.948)\nplt.savefig(dir_graphics+\"periodo.png\",dpi=200)\nplt.clf()\n#<------------------Ploteo de la relacion entre g y e----------------->\nee=np.linspace(0,1,100)\nplt.plot(ee,g(ee),color=\"#16db93\",lw=3)\nplt.yscale('log');plt.xticks(np.arange(0,1+0.1,0.1));plt.xlim(0,1)\nplt.ylim(0,100)\nplt.xlabel('excentricidad $(e)$',fontsize=14);plt.ylabel('función $g(e)$',fontsize=14)\nplt.grid(ls=\"--\",color=\"grey\")\nplt.savefig(dir_graphics+\"gvse.png\",dpi=200)\nplt.clf()\n#<-----------Ploteo de la solucion analitica y la numerica------------>\nplt.plot(e,at*R_ast*1e-9,color=\"#0582ca\",label='sol. numérica')\nee = np.linspace(min(e),e0,10)\na_an = a0_m*g(ee)*1e-9/g(e0)\nplt.plot(ee,a_an,'o',color=\"#051923\",label='sol. analítica')\nplt.xticks(np.arange(0,0.65+0.05,0.05));plt.yticks(np.arange(0,2+0.2,0.2))\nplt.xlim(0,0.65);plt.ylim(0,2)\nplt.xlabel('excentricidad $(e)$');plt.ylabel('semieje mayor $(10^{9})$')\nplt.legend(frameon=False,ncol=2,bbox_to_anchor=(0, 1,1,0.02),mode=\"expand\",fontsize=13)\nplt.grid(ls=\"--\",color=\"grey\")\nplt.subplots_adjust(left=0.09,bottom=0.117,right=0.971,top=0.883)\nplt.savefig(dir_graphics+\"solana_solnum.png\",dpi=200)\nplt.clf()\n#<-----------------------Solucion para datos observacionales---------------------->\n#<--------------Tiempo adimensional maximo de integracion-------------------->\ntt_int_max = 31557600*c*t_max_a/R_ast\ntt_int = np.linspace(0,tt_int_max,100000)\ntt,t_a,at,e,a_m,T_s = solucion(x0,tt_int)\ndota = dotx(x0,0)[0]\ndotT = (3/2)*(c/R_ast)*(T0_s/at0)*dota\ndata = np.loadtxt(\"data-HW.csv\",delimiter=\",\")\nt_exp = data[:,0]-data[0,0]\nDelta_t_exp = data[:,1]\nn = np.arange(40000)\nt_n = (n*T0_s+dotT*T0_s*n*(n-1)/2.)/31557600. # tiempo, en años\nDelta_t_n = dotT*T0_s*n*(n-1)/2 #retraso acumulado, en segundos\nplt.plot(t_n,Delta_t_n,color=\"#3fc1c0\",label='Relatividad general',lw=3)\nplt.hlines(0,0,40, color='#4f772d',label='Newtoniano',lw=3)\nplt.xlabel('tiempo (años)');plt.ylabel('Retraso acumulado (s)')\nplt.xlim(0,33);plt.ylim(-45,5)\nplt.xticks(np.arange(0,33+3,3));plt.yticks(np.arange(-45,5+5,5))\nplt.plot(t_exp,Delta_t_exp,'o',color=\"#1d4e89\",label='Datos observacionales')\nplt.legend(loc=3,frameon=False,ncol=3,bbox_to_anchor=(0, 1,1,0.02),mode=\"expand\")\nplt.grid(ls=\"--\",color=\"grey\")\nplt.subplots_adjust(left=0.09,bottom=0.095,right=0.967,top=0.91)\nplt.savefig(dir_graphics+\"exp.png\",dpi=200)\nplt.clf()","repo_name":"giovannilopez9808/Notas_Agosto_2020","sub_path":"RG/Proyecto_final/Documento/Scripts/Sistema_Binario-Evolucion_Temporal_y_observaciones_HyT.py","file_name":"Sistema_Binario-Evolucion_Temporal_y_observaciones_HyT.py","file_ext":"py","file_size_in_byte":6780,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73893338438","text":"import os\nimport sys\nimport datetime\nimport time\n\nimport math\nimport json\nfrom collections import OrderedDict\nfrom pathlib import Path\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport utils\nfrom augs.augs import IMAGE_AUGMENTATIONS, EMBED_AUGMENTATIONS, AugWrapper\nimport loaders\n\nfrom torchvision import models as torchvision_models\n\nimport losses\nfrom main_args import get_args_parser, process_args\nfrom model_builders import load_model\n\n\nclass TeacherStudentCombo(nn.Module):\n\n def __init__(self, student, teacher, args):\n super().__init__()\n # synchronize batch norms (if any)\n if utils.has_batchnorms(student) and not args.disable_ddp:\n student = nn.SyncBatchNorm.convert_sync_batchnorm(student)\n teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)\n # teacher and student start with the same weights\n teacher.load_state_dict(student.state_dict())\n # Hacky\n if not args.train_backbone:\n student.backbone = teacher.backbone\n elif not args.req_grad:\n print('WARNING: args.train_backbone=True, but args.req_grad=False. '\n 'This is probably not what you want.')\n # there is no backpropagation through the teacher, so no need for gradients\n for p in teacher.parameters():\n p.requires_grad = False\n print(f\"Student and Teacher are built: they are both {args.arch} network.\")\n\n self.args = args\n self.student = student\n self.teacher = teacher\n\n def forward(self, images):\n if self.args.train_backbone:\n return self.teacher(images), self.student(images)\n embed = self.teacher.backbone_embed(images)\n return self.teacher.apply_head(embed), self.student.apply_head(embed)\n\n @property\n def module(self):\n return self\n\n def student_dict(self):\n if self.args.train_backbone:\n return self.student.state_dict()\n return OrderedDict([(k, v) for k, v in self.student.state_dict().items() if \"backbone\" not in k])\n\n @property\n def trainable_student(self):\n if self.args.train_backbone:\n return self.student\n return self.student.head\n\n def teacher_dict(self):\n if self.args.train_backbone:\n return self.teacher.state_dict()\n return OrderedDict([(k, v) for k, v in self.teacher.state_dict().items() if \"backbone\" not in k])\n\n @property\n def trainable_teacher(self):\n if self.args.train_backbone:\n return self.teacher\n return self.teacher.head\n\n\ndef train_dino(args, writer):\n if not args.disable_ddp:\n utils.init_distributed_mode(args)\n if args.batch_size is not None:\n args.batch_size_per_gpu = args.batch_size // utils.get_world_size()\n utils.fix_random_seeds(args.seed)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n print(\"\\n\".join(\"%s: %s\" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))\n cudnn.benchmark = True\n\n student, _, normalize = load_model(args, split_preprocess=True)\n teacher, _ = load_model(args)\n\n if not args.precomputed:\n aug = IMAGE_AUGMENTATIONS[args.image_aug](num_augs=args.num_augs, **args.aug_args)\n transform = AugWrapper(\n vit_image_size=args.vit_image_size,\n aug_image_size=args.aug_image_size,\n global_augs=aug,\n normalize=normalize,\n image_size=args.image_size\n )\n else:\n aug = EMBED_AUGMENTATIONS[args.embed_aug](num_augs=args.num_augs, **args.aug_args)\n transform = AugWrapper(\n global_augs=aug\n )\n\n dataset = getattr(loaders, args.loader)(\n knn_path=args.knn_path,\n datapath=args.datapath,\n k=args.knn,\n transform=transform, dataset=args.dataset,\n precompute_arch=args.arch if args.precomputed else None,\n **args.loader_args)\n\n if not args.disable_ddp:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)\n else:\n sampler = None\n data_loader = torch.utils.data.DataLoader(\n dataset,\n shuffle=(sampler is None),\n sampler=sampler,\n batch_size=args.batch_size_per_gpu,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=True,\n )\n print(f\"In-distribution Data loaded: there are {len(dataset)} images.\")\n print(\"len dataloader\", len(data_loader))\n\n student_teacher_model = TeacherStudentCombo(teacher=teacher, student=student, args=args)\n # move networks to gpu\n student_teacher_model = student_teacher_model.cuda()\n if not args.disable_ddp:\n student_teacher_model = nn.parallel.DistributedDataParallel(student_teacher_model, device_ids=[args.gpu])\n\n\n # ============ preparing loss ... ============\n loss_class = getattr(losses, args.loss)\n dino_loss_args = dict(\n out_dim=args.out_dim,\n batchsize=args.batch_size_per_gpu,\n warmup_teacher_temp=args.warmup_teacher_temp,\n teacher_temp=args.teacher_temp,\n warmup_teacher_temp_epochs=args.warmup_teacher_temp_epochs,\n nepochs=args.epochs,\n **args.loss_args)\n if losses.is_multihead(loss_class):\n dino_loss_args.update(num_heads=args.num_heads)\n dino_loss = loss_class(**dino_loss_args).cuda()\n elif args.num_heads == 1:\n dino_loss = loss_class(**dino_loss_args).cuda()\n else:\n dino_loss = nn.ModuleList([loss_class(**dino_loss_args) for _ in range(args.num_heads)]).cuda()\n\n # ============ preparing optimizer ... ============\n params_groups = utils.get_params_groups(student_teacher_model.module.trainable_student)\n if args.optimizer == \"adamw\":\n optimizer = torch.optim.AdamW(params_groups) # to use with ViTs\n elif args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler\n elif args.optimizer == \"lars\":\n optimizer = utils.LARS(params_groups) # to use with convnet and large batches\n else:\n raise ValueError(\"Unknown optimizer: {}\".format(args.optimizer))\n # for mixed precision training\n fp16_scaler = None\n if args.use_fp16:\n fp16_scaler = torch.cuda.amp.GradScaler()\n\n # ============ init schedulers ... ============\n bs_factor = (args.batch_size_per_gpu * utils.get_world_size()) / 256.\n lr_schedule = utils.cosine_scheduler(\n args.lr * bs_factor, # linear scaling rule\n args.min_lr * bs_factor,\n args.epochs, len(data_loader),\n warmup_epochs=args.warmup_epochs,\n )\n wd_schedule = utils.cosine_scheduler(\n args.weight_decay,\n args.weight_decay_end,\n args.epochs, len(data_loader),\n )\n # momentum parameter is increased to 1. during training with a cosine schedule\n momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, args.max_momentum_teacher,\n args.epochs, len(data_loader))\n print(f\"Loss, optimizer and schedulers ready.\")\n\n # ============ optionally resume training ... ============\n to_restore = {\"epoch\": 0}\n utils.restart_from_checkpoint(\n os.path.join(args.output_dir, \"checkpoint.pth\"),\n run_variables=to_restore,\n student=student_teacher_model.module.student,\n teacher=student_teacher_model.module.teacher,\n optimizer=optimizer,\n fp16_scaler=fp16_scaler,\n dino_loss=dino_loss,\n )\n start_epoch = to_restore[\"epoch\"]\n\n start_time = time.time()\n print(\"Starting DINO training !\")\n for epoch in range(start_epoch, args.epochs):\n if not args.disable_ddp:\n data_loader.sampler.set_epoch(epoch)\n # ============ training one epoch of DINO ... ============\n train_stats = train_one_epoch(student_teacher_model, dino_loss,\n data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule,\n epoch, fp16_scaler, args, writer)\n\n # ============ writing logs ... ============\n save_dict = {\n 'student': student_teacher_model.module.student_dict(),\n 'teacher': student_teacher_model.module.teacher_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch + 1,\n 'args': args,\n 'dino_loss': dino_loss.state_dict(),\n }\n if fp16_scaler is not None:\n save_dict['fp16_scaler'] = fp16_scaler.state_dict()\n utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))\n if args.saveckp_freq and epoch % args.saveckp_freq == 0:\n utils.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n 'epoch': epoch}\n if utils.is_main_process():\n with (Path(args.output_dir) / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n try: \n torch.set_printoptions(profile=\"full\")\n if epoch % 10 == 0:\n d_loss = dino_loss[0] if hasattr(dino_loss, \"__getitem__\") else dino_loss\n print(\"highest probs:\", torch.topk(d_loss.probs_pos * 100, 50)[0])\n print(\"lowest probs:\", torch.topk(d_loss.probs_pos * 100, 50, largest=False)[0])\n torch.set_printoptions(profile=\"default\")\n except:\n print(\" \")\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\ndef train_one_epoch(student_teacher_model, dino_loss, data_loader,\n optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch,\n fp16_scaler, args, writer):\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)\n for it, data in enumerate(metric_logger.log_every(data_loader, 10, header)):\n images, _ = data\n \n # update weight decay and learning rate according to their schedule\n it = len(data_loader) * epoch + it # global training iteration \n for i, param_group in enumerate(optimizer.param_groups):\n param_group[\"lr\"] = lr_schedule[it]\n if i == 0: # only the first group is regularized\n param_group[\"weight_decay\"] = wd_schedule[it]\n # move images to gpu\n images = [im.cuda(non_blocking=True) for im in images]\n \n # teacher and student forward passes + compute dino loss\n with torch.cuda.amp.autocast(fp16_scaler is not None):\n teacher_out, student_out = student_teacher_model(images)\n if losses.is_multihead(dino_loss) or args.num_heads == 1:\n head_losses = dino_loss(student_out, teacher_out, epoch=epoch)\n else:\n head_losses = torch.stack([d(s, t, epoch=epoch) for d, s, t in zip(dino_loss, student_out, teacher_out)])\n loss = head_losses.mean()\n\n if not math.isfinite(loss.item()):\n print(\"Loss is {}, stopping training\".format(loss.item()), flush=True)\n sys.exit(1)\n\n # student update\n optimizer.zero_grad()\n param_norms = None\n if fp16_scaler is None:\n loss.backward()\n if args.clip_grad:\n param_norms = utils.clip_gradients(student_teacher_model, args.clip_grad)\n utils.cancel_gradients_last_layer(epoch, student_teacher_model,\n args.freeze_last_layer)\n optimizer.step()\n else:\n fp16_scaler.scale(loss).backward()\n if args.clip_grad:\n fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n param_norms = utils.clip_gradients(student_teacher_model, args.clip_grad)\n utils.cancel_gradients_last_layer(epoch, student_teacher_model,\n args.freeze_last_layer)\n fp16_scaler.step(optimizer)\n fp16_scaler.update()\n\n # EMA update for the teacher\n with torch.no_grad():\n m = momentum_schedule[it] # momentum parameter\n s_head_params = student_teacher_model.module.trainable_student.parameters()\n t_head_params = student_teacher_model.module.trainable_teacher.parameters()\n for param_q, param_k in zip(s_head_params, t_head_params):\n param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)\n\n # logging\n torch.cuda.synchronize()\n metric_logger.update(loss=loss.item())\n metric_logger.update_raw(head_losses=head_losses)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n metric_logger.update(wd=optimizer.param_groups[0][\"weight_decay\"])\n if utils.is_main_process():\n writer.add_scalar(\"Train loss step\", loss, it)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n\n if utils.is_main_process() and args.num_heads > 1:\n avg_loss = metric_logger.meters['head_losses'].global_avg\n student_teacher_model.module.teacher.head.set_losses(avg_loss)\n student_teacher_model.module.student.head.set_losses(avg_loss)\n\n if utils.is_main_process():\n if args.num_heads == 1:\n writer.add_scalar(\"Train loss epoch\", torch.Tensor([metric_logger.meters['loss'].global_avg]), epoch)\n else:\n avg_loss = metric_logger.meters['head_losses'].global_avg\n writer.add_scalars(\"Train loss epoch\",\n {f\"head{i}\": loss for i, loss in enumerate(avg_loss)},\n epoch)\n\n d_loss = dino_loss[0] if hasattr(dino_loss, \"__getitem__\") else dino_loss\n if hasattr(d_loss, 'probs_pos'):\n writer.add_histogram(\"p(k) over Epochs\", d_loss.probs_pos, epoch)\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.scalar_meters.items()}\n\n\ndef default_out_dir(loss, dset):\n return f\"./experiments/{loss}-{dset}\"\n\n\ndef make_out_dir(args):\n if args.output_dir is None:\n args.output_dir = default_out_dir(args.loss, args.loader)\n if args.new_run:\n n = 1\n dir_name = args.output_dir\n while Path(args.output_dir).is_dir():\n n += 1\n args.output_dir = f\"{dir_name}{n}\"\n Path(args.output_dir).mkdir(parents=True, exist_ok=not args.new_run)\n\n\ndef main():\n parser = get_args_parser()\n args = parser.parse_args()\n args = process_args(args)\n\n make_out_dir(args)\n writer = None\n if utils.is_main_process():\n writer = SummaryWriter(args.output_dir)\n with open(os.path.join(args.output_dir, \"hp.json\"), 'wt') as f:\n json.dump(vars(args), f, indent=4, default=str)\n train_dino(args, writer)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HHU-MMBS/TEMI-official-BMVC2023","sub_path":"train_main.py","file_name":"train_main.py","file_ext":"py","file_size_in_byte":15110,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"1644361038","text":"import requests\nimport json\n\naid = 45936507\n\nurl=f\"https://api.bilibili.com/x/web-interface/view?aid={aid}\"\nheader={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\nr=requests.get(url,headers=header)\nsdw=r.content.decode('utf-8')\nlks=json.loads(sdw)\nprint(lks)\n","repo_name":"ChenHaolinOlym/bilibili-subtitle-downloader","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"69992088837","text":"import os\nimport unittest\nfrom tests import ParserTest\nimport monstermash\n\nfiles = filter(lambda x: x.endswith(\".txt\"), os.listdir(\"tests/\"))\nfiles = map(lambda x: x[:-4], files)\n\n\nclass TestMonsterMashParser(ParserTest):\n name = \"MonsterMash\"\n parsers = None\n tests = files\n\n def genericParserFunction(self, tarr):\n return monstermash.parse(tarr)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"urzumph/monster-mash","sub_path":"monstermash_test.py","file_name":"monstermash_test.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23791424176","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Time : 2020/8/21\n@Author : jim\n@File : BloomFilter\n@Description : \n\"\"\"\n\nfrom bitarray import bitarray\nfrom mmh3 import mmh3\n\nclass BloomFilter:\n def __init__(self,size,hash_num):\n # 初始化传参,分别需要传入二进制列表的大小和hash分布数量两个参数\n self.size = size\n self.hash_num = hash_num\n # 初始化二进制列表\n self.bit_array = bitarray(size)\n self.bit_array.setall(0)\n\n def add(self,s):\n for seed in range(self.hash_num):\n result = mmh3(seed,s) % self.size\n self.bit_array[result] = 1\n\n def lookup(self,s):\n for seed in range(self.hash_num):\n if self.bit_array[mmh3(seed,s) % self.size] == 0:return 'Nope'\n return 'Probably'\n\nbf = BloomFilter(10,3)\nbf.add('hello')\nbf.add('world')\nprint(bf.lookup('hello'))\nprint(bf.lookup('bye'))","repo_name":"DanJimm/AlgorithmStudy","sub_path":"BloomFilter.py","file_name":"BloomFilter.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72858086024","text":"import matplotlib as mpl\nfrom matplotlib import cm\nimport folium\nimport pandas as pd\nimport numpy as np\nimport os\nimport webbrowser\nfrom PIL import Image\n\ndef calcColors(values : pd.Series):\n \"\"\"\n For a given series this function calculates an RGB color for each value\n in the series. Larger numbers will be allocated a darker color.\n\n Parameters\n --------\n values : a series conatining interger or float values\n\n Returns\n --------\n colors : a list of RGB colors, list\n \"\"\"\n # Convert series to list\n values = values.tolist()\n # Create color for each value in values\n colors = [\"#%02x%02x%02x\" % (int(r), int(g), int(b))\n for r, g, b, _ in\n 255*mpl.cm.OrRd(mpl.colors.Normalize()(values))\n ]\n\n return colors\n\ndef calcSize(values : pd.Series):\n \"\"\"\n For each value in a given series the float or integer provided is converted\n into a number that can be used to determine the size of a point in a plot.\n Parameters\n --------\n values : a series conatining interger or float values\n\n Returns\n --------\n sizes : a list of point sizes, list\n\n \"\"\"\n # Calculate size of bubble\n sizes = np.where(values < 16, ((values**3)**0.5)*1.3, ((17**3)**0.5)*1.3)\n\n return sizes.tolist()\n\ndef rotateCustomIcon(rotation : int):\n output_image = os.path.abspath('../arrow/arrow_temp.png')\n # Load arrow\n image = Image.open(os.path.abspath('../arrow/arrow.png'))\n # Rotate\n image = image.rotate(rotation, expand=True)\n # Save image\n image.save(output_image)\n # Create folium custom icon\n icon = folium.features.CustomIcon(output_image, icon_size=(50, 50))\n\n return icon\n\ndef drawSurfMap(df : pd.DataFrame):\n \"\"\"\n Using folium a html map is created with a circle marker for each surf spot.\n The circle's size is determined by the max breaking wave height and the\n color intensity by the number of MSW solid stars of the swell. A popup marker\n is included providing further information: spot name, swell height, period\n and number of MSW solid stars.\n\n Parameters\n --------\n df : DataFrame with the following columns: spot, solidRating, maxBreakingHeight,\n longitude and latitude, pd.DataFrame\n\n Returns\n --------\n m : folium html map, folium.map\n \"\"\"\n # Calculate color values for color map\n colors = calcColors(df['solidRating'])\n # Calculate sizes\n sizes = calcSize(df['maxBreakingHeight'])\n\n # Intialise folium\n m = folium.Map([26, -40], zoom_start=3.5, tiles='cartodbpositron')\n\n # Add a cirlce marker for each spot\n for i, row in df.iterrows():\n # Add an arrow to map pointing in direction of the wind\n# folium.Marker(location=[row.longitude, row.latitude],\n# icon=rotateCustomIcon(rotation=row.direction),\n# ).add_to(m)\n # Define popup text\n popup = (row['spot'] +\n '
    msw stars: ' + str(row.solidRating) +\n '
    wave height: ' + str(row.maxBreakingHeight) + 'ft' +\n ' @ ' + str(row.period) + 's' +\n '
    wind speed: ' + str(row.speed) + 'mph')\n # Add CircleMarker to map for each spot\n folium.CircleMarker(location=[row.longitude, row.latitude],\n radius=sizes[i],\n fill=True,\n popup=popup,\n fill_color=colors[i],\n color=None,\n fill_opacity=0.7\n ).add_to(m)\n\n # Save map as html\n m.save('../index.html')\n # Open html file in web browser\n webbrowser.open('file://' + os.path.realpath('../index.html'))\n","repo_name":"jcconnell/msw","sub_path":"msw/geoVisual.py","file_name":"geoVisual.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"15683791716","text":"# !/user/bin/env python\n# _*_ coding: utf-8 _*_\n# @File : test_v1_acquiring_open_account_status.py\n# @Author: zy\n# @Date : 2020/4/13\nimport pytest\n\nfrom ProductApi.StoreWeb import api\nfrom test_cases.store_web.data import account_data\n\n\ndef get_resp(params, params1):\n username = account_data.data()[\"username\"]\n password = account_data.data()[\"password\"]\n api1 = api.StoreWebApi(username=username, password=password, trading_entity=params, Minor_Version=params1,\n print_results=True)\n resp = api1.v1_acquiring_open_account_status()\n # resp.encoding = 'utf-8'\n return resp\n\n\n# 用例1——测试账本“FF3”,提交开户资料,但是审核拒绝,即审核失败(账本id是3675196),Minor-Version有值为4则是用于Web请求\ndef test_1():\n resp = get_resp(params=\"3675196\", params1=\"4\")\n assert resp.status_code == 200\n # dict_text = json.loads(resp.text)\n dict_text = resp.json()\n assert dict_text[\"total_status\"] == 0\n\n\n# 用例2——测试账本“dd”,提及开户资料,且处于待审核,即商户还未审核。\n# 即生意账本后台的商户管理没有点击编辑,没有进入审核中的状态,Minor-Version有值为“4”则是用于Web请求\ndef test_2():\n resp = get_resp(params=\"3672790\", params1=\"4\")\n assert resp.status_code == 200\n # dict_text = json.loads(resp.text)\n dict_text = resp.json()\n assert dict_text[\"total_status\"] == 3\n\n\n# 用例3——测试账本“零售勿删3604098”,该账本已经开户\ndef test_3():\n resp = get_resp(params=\"3604098\", params1=\"4\")\n assert resp.status_code == 200\n # dict_text = json.loads(resp.text)\n dict_text = resp.json()\n assert dict_text[\"total_status\"] == 2\n\n\n# 用例4——测试账本“12345”,该账本审核失败\ndef test_4():\n resp = get_resp(params=\"370059118\", params1=\"\")\n assert resp.status_code == 200\n # dict_text = json.loads(resp.text)\n dict_text = resp.json()\n assert dict_text[\"total_status\"] == 0\n\n\n# 用例5——测试账本“FF3”,该账本提交资料,审核失败\ndef test_5():\n resp = get_resp(params=\"3675196\", params1=\"\")\n assert resp.status_code == 200\n # dict_text = json.loads(resp.text)\n dict_text = resp.json()\n assert dict_text[\"total_status\"] == 0\n\n\n# 用例6——测试账本“零售勿删3604098”,该账本已经开户\ndef test_6():\n resp = get_resp(params=\"3604098\", params1=\"\")\n assert resp.status_code == 200\n # dict_text = json.loads(resp.text)\n dict_text = resp.json()\n assert dict_text[\"total_status\"] == 2\n\n\nif __name__ == '__main__':\n pytest.main()\n\n# 插叙账本的开户状态,增加新版本Minor-Version=4,是测之前的旧版本是否兼容\n\n# total_status的值分别表示:-1:未开户 ,0:审核失败,1:审核中,2:审核成功,3:待审核\n\n# 其中3604098账本已开户,3672790账本对应的是账本名是dd,3675196对应的账本名是FF3\n","repo_name":"zbw3/SuiBusiness","sub_path":"test_cases/store_web/MMDS_6141/test_v1_acquiring_open_account_status.py","file_name":"test_v1_acquiring_open_account_status.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4201878934","text":"import logging\nimport time\n\nimport fastapi\n\n\nclass CacheBase:\n def __init__(self, gc_size=50):\n self.cache_storage: dict[any, tuple[any, int]] = {}\n self.gc_size = gc_size\n\n def set(self, key, data, expire):\n if len(self.cache_storage) > self.gc_size:\n self.gc()\n self.cache_storage[key] = (data, int(expire))\n\n def get(self, key):\n if len(self.cache_storage) > self.gc_size:\n self.gc()\n try:\n o = self.cache_storage[key]\n if int(time.time()) > o[1]:\n del self.cache_storage[key]\n return None\n else:\n return o[0]\n except KeyError:\n return None\n\n def gc(self):\n now = int(time.time())\n for i in list(self.cache_storage.keys()):\n try:\n if now > self.cache_storage[i][1]:\n del self.cache_storage[i]\n except KeyError:\n continue\n\n\ndef _rate_limiter(request: fastapi.Request):\n cache: CacheBase = request.app.state.cache\n uid = request.state.uid\n now = int(time.time())\n if cache.get(uid) is None:\n cache.set(uid, True, now + 10)\n return\n else:\n logging.info(f\"user {uid} request too fast\")\n raise fastapi.HTTPException(status_code=429, detail=\"request too fast\")\n\n\nrate_limiter = fastapi.Depends(_rate_limiter)\n\n\ndef inject_rate_limiter(app: fastapi.FastAPI):\n app.state.cache = CacheBase()\n","repo_name":"HSwift/challenged","sub_path":"app/limiter.py","file_name":"limiter.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31392181216","text":"class Solution:\n def removeKdigits(self, num: str, k: int) -> str:\n v, s = list(), 0\n for ch in num:\n while len(v)>0 and k>0 and v[-1]>ch:\n v.pop(); k-=1\n v.append(ch)\n while k>0:\n k-=1 ; v.pop()\n while s < len(v):\n if v[s] != '0':\n break\n s += 1\n return ''.join(v[s:]) if s < len(v) else '0'","repo_name":"jitaeyun/algorithm","sub_path":"leetcode/Python/remove-k-digits.py","file_name":"remove-k-digits.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30536938664","text":"arr = [0, 0, 1, 1, 0, 0, 1, 0, 1]\n\n\ndef sort_2_color():\n li = 0\n hi = len(arr) - 1\n while li <= hi:\n if arr[li] == 0:\n li += 1\n else:\n arr[li], arr[hi] = arr[hi], arr[li]\n hi -= 1\n return arr\n\n\nprint(sort_2_color())\n\narr = [1, 1, 2, 0, 1, 2, 0, 1, 0, 2, 1]\n\n\ndef sort_3_color():\n li = 0\n mi = 0\n hi = len(arr) - 1\n\n while mi <= hi:\n if arr[mi] == 0:\n arr[mi], arr[li] = arr[li], arr[mi]\n mi += 1\n li += 1\n elif arr[mi] == 1:\n mi += 1\n elif arr[mi] == 2:\n arr[mi], arr[hi] = arr[hi], arr[mi]\n hi -= 1\n return arr\n\n\nprint(sort_3_color())\n\n\n\"\"\"\n[0, 0, 0, 0, 0, 1, 1, 1, 1]\n[0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2]\n\"\"\"","repo_name":"AjayKrP/GoogleInterview","sub_path":"DS_Practice/Array/dutch-national-flag-problem.py","file_name":"dutch-national-flag-problem.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"45326222416","text":"import json\r\nimport sys\r\nimport requests\r\nimport getpass\r\nimport urllib3\r\n\r\n#Prevent SSL certificate warnings\r\nurllib3.disable_warnings()\r\n\r\n#Option menu\r\nmenu = {}\r\nmenu[\"1\"]=\"-- List all FMC host objects with IP and Object ID\" \r\nmenu[\"2\"]=\"-- List all FMC host objects full JSON output\"\r\n\r\nwhile True: \r\n options=menu.keys()\r\n print(\"\\n\" * 100)\r\n print(\"\\n\\n\")\r\n print(\"GET Hosts from FMC\")\r\n print(\"\\n\\n\")\r\n for entry in options: \r\n print(entry, menu[entry])\r\n selection=input(\"\\nSelect an option: \")\r\n if selection in menu:\r\n break\r\n\r\n#Get FMC Server \r\nserver_start = \"https://\"\r\nserver_main = input(\"\\nEnter the IP or FQDN of your FMC: https://\")\r\nserver = server_start + server_main\r\n\r\n#Get FMC Credentials\r\nusername = input(\"Username: \")\r\npassword = getpass.getpass(\"Password: \")\r\n\r\n#Update to console\r\nprint(\"\\nAccessing FMC API...\", end =\"\")\r\n\r\n#Enable output to be logged and specify outputfile\r\norig_stdout = sys.stdout\r\nsys.stdout = open('GEToutput.txt', 'w')\r\n\r\n#Define authentication elements\r\nr = None\r\nheaders = {'Content-Type': 'application/json'}\r\napi_auth_path = \"/api/fmc_platform/v1/auth/generatetoken\"\r\nauth_url = server + api_auth_path\r\n\r\n#Generate insecure auth token for API operation\r\ntry:\r\n r = requests.post(auth_url, headers=headers, auth=requests.auth.HTTPBasicAuth(username,password), verify=False)\r\n auth_headers = r.headers\r\n auth_token = auth_headers.get('X-auth-access-token', default=None)\r\n auth_domain = auth_headers.get('DOMAIN_UUID', default=None)\r\n if auth_token == None:\r\n print(\"auth_token not found. Exiting...\")\r\n sys.exit()\r\nexcept Exception as err:\r\n print (\"Error getting auth token : \"+str(err))\r\n sys.exit()\r\n \r\n#Create authenticated url for API operation\r\nheaders['X-auth-access-token']=auth_token \r\napi_path = \"/api/fmc_config/v1/domain/\" + auth_domain + \"/object/hosts?expanded=true&limit=1000\"\r\nurl = server + api_path\r\nif (url[-1] == '/'):\r\n url = url[:-1]\r\n\r\n#Generate insecure auth token for API operation \r\ntry:\r\n r = requests.get(url, headers=headers, verify=False)\r\n status_code = r.status_code\r\n resp = r.text\r\n if (status_code == 200):\r\n json_resp = json.loads(resp)\r\n else:\r\n r.raise_for_status()\r\n print(\"GET error : \"+resp)\r\nexcept requests.exceptions.HTTPError as err:\r\n print (\"Connection error : \"+str(err)) \r\n\r\nfinally:\r\n if r : r.close()\r\n\r\n#GET host objects in text format \r\nif selection == \"1\":\r\n print(\"hostname,ipaddress,objectid\")\r\n for HOST in json_resp['items']:\r\n print(HOST['name'] + \",\" + HOST['value'] + \",\" + HOST['id'])\r\n#GET host objects in JSON format \r\nelif selection == \"2\":\r\n print(json.dumps(json_resp['items'],indent=3, separators=(',', ': ')))\r\nelse:\r\n sys.exit()\r\n#Disable output to be logging\r\nsys.stdout = orig_stdout\r\n\r\n#Update to console\r\nprint(\"Completed\\n\\nLocate GEToutput.txt int the script execution directory\\n\")\r\n#End script mode\r\ninput(\"Press to close this window.\")","repo_name":"Maneck-UK/FMC-API-Scripts","sub_path":"get-hosts-for-csv-070620.py","file_name":"get-hosts-for-csv-070620.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"18483893330","text":"from db import *\nfrom util import *\nfrom config import *\nimport pandas as pd\n\n\ndef system_stats(system_name):\n\n total_click = 0\n win = 0\n loss = 0\n tie = 0\n\n system = systems.select(systems.c.name==system_name).execute().first()\n\n if system_name not in BASELINE_SYSTEMS:\n system_sessions = sessions.select(sessions.c.system_ranking == system.id).execute().fetchall()\n if not len(system_sessions):\n system_sessions = sessions.select(sessions.c.system_recommendation == system.id).execute().fetchall()\n\n else:\n type = 'RANK' if system_name == 'livivo_base' else 'REC'\n system_ids = [system.id for system in systems.select(systems.c.type == type).execute().fetchall()]\n if type == 'RANK':\n system_sessions = sessions.select(sessions.c.system_ranking.in_(system_ids)).execute().fetchall()\n else:\n system_sessions = sessions.select(sessions.c.system_recommendation.in_(system_ids)).execute().fetchall()\n\n session_ids = [s.id for s in system_sessions]\n system_feedbacks = feedbacks.select(feedbacks.c.session_id.in_(session_ids)).execute().fetchall()\n\n for sys_feed in system_feedbacks:\n base_click_cnt = 0\n exp_click_cnt = 0\n for rank, doc in sys_feed.clicks.items():\n if doc.get('type') == 'EXP' and doc.get('clicked'):\n exp_click_cnt += 1\n if system_name not in BASELINE_SYSTEMS:\n total_click += 1\n\n if doc.get('type') == 'BASE' and doc.get('clicked'):\n base_click_cnt += 1\n if system_name in BASELINE_SYSTEMS:\n total_click += 1\n\n if base_click_cnt == exp_click_cnt and base_click_cnt + exp_click_cnt > 0:\n tie += 1\n\n if base_click_cnt > exp_click_cnt:\n if system_name not in BASELINE_SYSTEMS:\n loss += 1\n else:\n win += 1\n\n if base_click_cnt < exp_click_cnt:\n if system_name not in BASELINE_SYSTEMS:\n win += 1\n else:\n loss += 1\n\n num_sessions = len(system_sessions)\n impressions = len(system_feedbacks)\n outcome = win / (win + loss) if win + loss > 0 else 0\n ctr = total_click / impressions if impressions > 0 else 0\n\n return {'Win': win,\n 'Loss': loss,\n 'Tie': tie,\n 'Outcome': outcome,\n 'Sessions': num_sessions,\n 'Impressions': impressions,\n 'Clicks': total_click,\n 'CTR': ctr}\n\n\ndef main():\n mkdir(RESULT_DIR)\n system_names = [system.name for system in systems.select().execute().fetchall() if system.name not in NOT_PARTICIPATED]\n overall_stats = {system_name: system_stats(system_name) for system_name in system_names}\n pd.DataFrame.from_dict(overall_stats).transpose().to_csv(os.path.join(RESULT_DIR, 'overall_stats.csv'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stella-project/stella-evaluations","sub_path":"scripts/overall_stats.py","file_name":"overall_stats.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9028707336","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport math\n\ndef plotL(u, v, xmin, xmax, ymin, ymax, title, filename):\n origin = np.array([[0, 0], [0, 0]])\n V = np.array([u, v])\n\n fig, ax = plt.subplots()\n ax.quiver(*origin, V[:,0], V[:,1], color=['blue'], angles='xy', scale_units='xy', scale=1)\n\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.grid()\n \n plt.title(title,fontsize=10)\n plt.savefig(filename, bbox_inches='tight')\n # plot.show()\n\ndef main():\n U = np.array([0, 2])\n V = np.array([1, 0])\n plotL(U, V, -2, 2, -2, 2, \"The letter L\", \"ch2sec2fig0.png\")\n angle = math.pi/4\n angle2 = math.pi/2\n angle3 = -1 * math.pi / 2\n transforms = [\n np.array([[0.5, 0], [0, 0.5]]),\n np.array([[0, -1], [1, 0]]),\n np.array([[-1, 0], [0, 1]]),\n np.array([[1, 0], [0, -1]]),\n np.array([[math.cos(angle), -1 * math.sin(angle)], [math.sin(angle), math.cos(angle)]]),\n np.array([[math.cos(angle2), -1 * math.sin(angle2)], [math.sin(angle2), math.cos(angle2)]]),\n np.array([[math.cos(angle3), -1 * math.sin(angle3)], [math.sin(angle3), math.cos(angle3)]])\n ]\n for i in range(len(transforms)):\n transform = transforms[i]\n plotL(np.matmul(transform, U), np.matmul(transform, V), -2, 2, -2, 2, \"Transformation %d\" %(i+1), \"ch2sec2fig%d.png\" %(i+1))\n\nif __name__ == '__main__':\n main()\n","repo_name":"ataylor89/LinearAlgebraWithApplications","sub_path":"_ch2sec2.py","file_name":"_ch2sec2.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27530995398","text":"from collections import deque\r\nn = int(input())\r\ngraph = [list(map(int, input().split())) for _ in range(n)]\r\ncnt = 0\r\nmax_h = 0\r\nresult = 0\r\nfor i in range(n):\r\n tmp = max(graph[i])\r\n max_h = max(tmp, max_h)\r\ndx = [0, -1, 0, 1]\r\ndy = [-1, 0, 1, 0]\r\ndef bfs(a, b, visited, h):\r\n q = deque()\r\n q.append((a,b))\r\n visited[a][b] = 1\r\n while q:\r\n x, y = q.popleft()\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if 0<=nx h and not visited[nx][ny]:\r\n q.append((nx,ny))\r\n visited[nx][ny] = 1\r\nfor h in range(max_h):\r\n visited = [[0]*n for _ in range(n)]\r\n cnt = 0\r\n for i in range(n):\r\n for j in range(n):\r\n if not visited[i][j] and graph[i][j] > h:\r\n bfs(i, j, visited, h)\r\n cnt += 1\r\n if result < cnt : \r\n result = cnt\r\nprint(result)","repo_name":"sshee0123/Baekjoon","sub_path":"백준/Silver/2468. 안전 영역/안전 영역.py","file_name":"안전 영역.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37226185881","text":"#\n# @lc app=leetcode.cn id=62 lang=python3\n#\n# [62] 不同路径\n#\n\n# @lc code=start\nclass Solution:\n # 动态规划要点:\n # 1.定义状态\n # 2.状态初始化\n # 3.状态转移方程\n def uniquePaths(self, m: int, n: int) -> int:\n if m == 0 or n==0:\n return 0\n matrix = [[0 for i in range(n)] for j in range(m)]\n for i in range(m):\n matrix[i][0] = 1\n for i in range(n):\n matrix[0][i] = 1\n for i in range(1,m):\n for j in range(1,n):\n matrix[i][j] = sum([matrix[i-1][j],matrix[i][j-1]])\n return matrix[m-1][n-1]\n \n# @lc code=end\n\n","repo_name":"LianShuaiLong/Codebook","sub_path":"leetcode/62.不同路径.py","file_name":"62.不同路径.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"2642101265","text":"#!/usr/bin/env python3\n# coding = utf-8\nimport sqlite3\nimport sys\nimport os\n\n# Create a default database path for connection.\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDB_PATH = os.path.join(BASE_DIR, r'register.sqlite3' )\n\ndef get_conn():\n con = None\n try:\n con = sqlite3.connect( DB_PATH,\n detect_types = sqlite3.PARSE_COLNAMES | sqlite3.PARSE_DECLTYPES )\n # print( con )\n print( 'Sqlite3 Version: -> ', sqlite3.version )\n print( 'Successfull Connection!' )\n return( con )\n \n except sqlite3.Error as e:\n print( e )\n \ndef insert_registration( registration_information ):\n insert_registration_sql = '''\n INSERT OR IGNORE INTO registration(\n first_name,\n last_name,\n contact,\n email,\n question,\n answer,\n password )\n VALUES( ?,?,?,?,?,?,? ); '''\n conn = get_conn()\n cursor = conn.cursor()\n cursor.executemany( insert_registration_sql, registration_information )\n conn.commit()\n cursor.execute( 'SELECT max( id ) FROM registration' )\n max_id = cursor.fetchone()[0]\n cursor.close()\n conn.close()\n print( 'Current Max ID -> ', max_id )\n return( max_id )\n \ndef check_email( user_email ):\n conn = get_conn()\n cursor = conn.cursor()\n cursor.execute( 'SELECT email FROM registration WHERE email = ?', ( user_email, ))\n query_result = cursor.fetchone()\n cursor.close()\n conn.close()\n return( query_result )\n\n \ndef create_tables():\n setup_pragma_sql = '''\n PRAGMA count_changes = True;\n PRAGMA foreign_keys = True;\n PRAGMA full_column_names = True;\n PRAGMA locking_mode = EXCLUSIVE;\n PRAGMA secure_delete = True;\n PRAGMA auto_vacuum = Full; '''\n \n setup_registration_table_sql = '''\n CREATE TABLE IF NOT EXISTS registration(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n first_name NVARCHAR( 100 ) NOT NULL,\n last_name NVARCHAR( 100 ) NOT NULL,\n contact NVARCHAR( 100 ) NOT NULL,\n email NVARCHAR( 100 ) NOT NULL,\n question NVARCHAR( 100 ) NOT NULL,\n answer NVARCHAR( 100 ) NOT NULL,\n password NVARCHAR( 100 ) NOT NULL,\n modified DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP );'''\n \n conn = get_conn()\n cursor = conn.cursor()\n cursor.executescript( setup_pragma_sql )\n cursor.execute( setup_registration_table_sql )\n conn.commit()\n cursor.close()\n conn.close()\n \n print( 'Pragma and create registration table complete' )\n\nif __name__ == '__main__':\n pass\n ### create_tables() # Execute only once.\n \n","repo_name":"cmiles69/Register","sub_path":"register_db.py","file_name":"register_db.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23291272948","text":"from __future__ import unicode_literals\n# -*- coding: utf8 -*-\nfrom django.conf import settings as d_settings\n\n# Scrapy settings for crawler project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'ecommerce_crawling.crawler'\n\nSPIDER_MODULES = ['ecommerce_crawling.crawler.spiders']\nNEWSPIDER_MODULE = 'ecommerce_crawling.crawler.spiders'\n\nSECRET_KEY = \"secret key value\"\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'crawler (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = True\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\n#DOWNLOAD_DELAY = 3\n# The download delay setting will honor only one of:\n#CONCURRENT_REQUESTS_PER_DOMAIN = 16\n#CONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\nCOOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'crawler.middlewares.MyCustomSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'crawler.middlewares.MyCustomDownloaderMiddleware': 543,\n#}\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\n#ITEM_PIPELINES = {\n# 'crawler.pipelines.SomePipeline': 300,\n#}\n\nAUTOTHROTTLE_ENABLED = True\nAUTOTHROTTLE_START_DELAY = 10.0\nAUTOTHROTTLE_MAX_DELAY = 39.0\nAUTOTHROTTLE_DEBUG = True # pour activer l'affichage de statistiques supplémentaires\n\nCONCURRENT_REQUESTS = 1\n\nREDIRECT_MAX_TIMES = 3\n\nDOWNLOAD_TIMEOUT = 180\n\n\nEXTENSIONS = {\n 'scrapy.contrib.corestats.CoreStats': 500,\n 'scrapy.contrib.logstats.LogStats': 500,\n # 'util.statsToDb.statsToDb': 800,\n 'scrapy.contrib.throttle.AutoThrottle': 900,\n # 'util.EndMiddleware.VacuumJobdir':900\n}\n\n# DOWNLOADER_MIDDLEWARES = {\n# 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,\n# #activation de rotateUserAgents ici. Va attributer user agent different à chaque requête.\n# }\n\n\nRETRY_ENABLED = False # Booléen qui dit si oui on non, si je n'arrive pas à acceder à une page, je réeessaye (non ici)\n # Si c'était vrai, on ressayerai à l'infini\n\nROBOTSTXT_OBEY = False # Our bot don't follow robots.txt recommandations. On ne suis pas les recommandations des robots.Txt.\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED = True\n#HTTPCACHE_EXPIRATION_SECS = 0\n#HTTPCACHE_DIR = 'httpcache'\n#HTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nINSTALLED_APPS=(\n 'ecommerce_crawling.crawler',\n)\n\nDATABASES={\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'ecom',\n 'USER': 'ecom',\n 'PASSWORD': 'ecom',\n 'HOST': 'localhost',\n }\n }\n\nd_settings.configure(\n DATABASES={\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'ecom',\n 'USER': 'ecom',\n 'PASSWORD': 'ecom',\n 'HOST': 'localhost',\n }\n },\n INSTALLED_APPS=(\n 'crawler',\n )\n )\n","repo_name":"Korriliam/ecommerce-crawling","sub_path":"ecommerce_crawling/crawler/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12913815240","text":"import pdb\n\nimport numpy as np\nimport torch as torch\nfrom filter import kalman_filter\nimport matplotlib.pyplot as plt\n\n\n# Lane informatino of the map\nlane_heading = {}\nlane_heading['gneE05a_0'] = [\n np.arctan((-1.75 + 1.98) / 20.75),\n np.arctan((-1.34 + 1.75) / (49.11 - 20.75)),\n np.arctan((-0.53 + 1.34) / (85.43 - 49.11)),\n np.arctan((1.40 + 0.53) / (132.38 - 85.43)),\n np.arctan((2.52 - 1.40) / (165.1 - 132.38)),\n np.arctan((2.57 - 2.52) / (180 - 165.1))\n]\nlane_heading['gneE05b_0'] = 0\nlane_heading['gneE51_0'] = 0\nlane_heading['gneE01_0'] = 0\nlane_heading['gneE01_1'] = 0\nlane_heading['gneE01_2'] = 0\nlane_heading['gneE01_3'] = 0\nlane_heading['gneE01_4'] = 0\n\nlane_width = {\n 'gneE05a_0': 4.25,\n 'gneE05b_0': 7.4738,\n 'gneE51_0': 6.21,\n 'gneE01_0': 3.6576,\n 'gneE01_1': 3.6576,\n 'gneE01_2': 3.6576,\n 'gneE01_3': 3.6576,\n 'gneE01_4': 3.6576,\n}\n\n\n\ndef clip(x, low, high):\n if x < low:\n return low\n if x > high:\n return high\n return x\n\n\ndef get_lane_info(lane_id, x):\n if lane_id == 'gneE05a_0':\n if 0 <= x < 20.75:\n heading = lane_heading[lane_id][0]\n y = -1.98 + (x-0) * np.tan(heading)\n elif 20.75 <= x < 49.11:\n heading = lane_heading[lane_id][1]\n y = -1.75 + (x-20.75) * np.tan(heading)\n elif 49.11 <= x < 85.43:\n heading = lane_heading[lane_id][2]\n y = -1.34 + (x-49.11) * np.tan(heading)\n elif 85.43 <= x < 132.38:\n heading = lane_heading[lane_id][3]\n y = -0.53 + (x-85.43) * np.tan(heading)\n elif 132.38 <= x < 165.1:\n heading = lane_heading[lane_id][4]\n y = 1.40 + (x-132.38) * np.tan(heading)\n else:\n heading = lane_heading[lane_id][5]\n y = 2.52 + (x-165.1) * np.tan(heading)\n else:\n heading = 0\n if lane_id == 'gneE05b_0':\n y = 2.87\n elif lane_id == 'gneE51_0':\n y = 3.50\n elif lane_id == 'gneE01_0':\n y = 8.41\n elif lane_id == 'gneE01_1':\n y = 12.10\n elif lane_id == 'gneE01_2':\n y = 15.79\n elif lane_id == 'gneE01_3':\n y = 19.48\n else:\n y = 23.18\n return heading, y\n\n\ndef conjugate_gradient(Av_func, b, max_iter=50, residual_tol=1e-10):\n x = torch.zeros_like(b)\n r = b - Av_func(x)\n p = r\n rsold = r.norm() ** 2 # 2-norm\n\n for _ in range(max_iter):\n Ap = Av_func(p)\n alpha = rsold / torch.dot(p, Ap)\n x = x + alpha * p\n r = r - alpha * Ap\n rsnew = r.norm() ** 2\n if torch.sqrt(rsnew) < residual_tol:\n break\n p = r + (rsnew / rsold) * p\n rsold = rsnew\n\n return x\n\n\n# only available for gaussian distributions\n# refer https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#\n# As the cov mat is diag, so some operations like trace and inverse is achieved by sumation\ndef kl_divergence(dist, old_dist, action_dim, require_grad=False):\n old_mean = old_dist.mean.detach()\n old_cov = old_dist.covariance_matrix.sum(-1).detach()\n if require_grad:\n mean = dist.mean\n cov = dist.covariance_matrix.sum(-1)\n else:\n mean = dist.mean.detach()\n cov = dist.covariance_matrix.sum(-1).detach()\n return 0.5 * ((old_cov / cov).sum(-1)\n + (((old_mean - mean) ** 2) / cov).sum(-1)\n - action_dim\n + torch.log(cov).sum(-1)\n - torch.log(old_cov).sum(-1)).mean()\n\n\ndef get_flat_grads(f, net):\n flat_grads = torch.cat([\n grad.view(-1)\n for grad in torch.autograd.grad(f, net.parameters(), create_graph=True)\n ])\n\n return flat_grads\n\n\ndef get_flat_params(net):\n return torch.cat([param.view(-1) for param in net.parameters()])\n\n\ndef get_l2_norm(net):\n param = get_flat_params(net)\n norm = torch.mean(param ** 2)\n return norm\n\n\ndef compute_discounted_rewards(rewards, gamma):\n rewards = np.array(rewards)\n discounted_rewards = np.zeros_like(rewards)\n discounted_rewards[-1] = rewards[-1]\n for i in reversed(range(len(rewards)-1)):\n discounted_rewards[i] = gamma * discounted_rewards[i+1] + rewards[i]\n return discounted_rewards.tolist()\n\n\ndef set_params(net, new_flat_params):\n start_idx = 0\n for param in net.parameters():\n end_idx = start_idx + np.prod(list(param.shape))\n param.data = torch.reshape(\n new_flat_params[start_idx:end_idx], param.shape\n )\n\n start_idx = end_idx\n\n\ndef expert_collector3(obs):\n _obs = []\n ego_state = obs.ego_vehicle_state\n neighbors = obs.neighborhood_vehicle_states\n neighbor_idx = np.arange(len(neighbors))\n x, y = ego_state.position[:2]\n l = ego_state.bounding_box.length\n w = ego_state.bounding_box.width\n heading = ego_state.heading.real\n speed = ego_state.speed\n _obs = [ego_state]\n neighbor_position = np.array([n.position[:2] for n in neighbors])\n if len(neighbor_position) > 0:\n dist = np.sqrt((neighbor_position[:, 0] - x) ** 2 + (neighbor_position[:, 1] - y) ** 2)\n nei_dist = [(neighbor_idx[i], dist[i]) for i in neighbor_idx]\n nei_dist.sort(key=lambda x: x[1])\n # closest 16 neighbors\n neighbor_num = 16\n l = min(len(neighbor_position), neighbor_num)\n for i in range(l):\n n = neighbors[nei_dist[i][0]]\n n_x, n_y = n.position[:2]\n n_l = n.bounding_box.length\n n_w = n.bounding_box.width\n n_heading = n.heading\n n_speed = n.speed\n _obs += [n_x, n_y, n_l, n_w, n_heading, n_speed]\n for i in range(l, neighbor_num):\n _obs += [0, 0, 0, 0, 0, 0]\n events = [0] * 4\n Event = obs.events\n if len(Event.collisions) > 0:\n events[0] = 1\n if Event.off_road:\n events[1] = 1\n if ego_state.speed < 0:\n events[2] = 1\n if np.pi/2 < normalize(ego_state.heading + np.pi/2, 0, np.pi*2, np.pi*2) < np.pi/2*3:\n events[3] = 1\n _obs += events\n return _obs\n\n\ndef feature15(obs):\n ego_state = obs[0]\n vehicles = []\n for i in range(16):\n vehicle_state = obs[6 * i + 1: 6 * i + 7]\n vehicles.append(vehicle_state)\n\n # ego info\n e_x, e_y = ego_state.position[:2]\n e_h = ego_state.heading.real\n e_s = ego_state.speed\n e_l = ego_state.bounding_box.length\n e_w = ego_state.bounding_box.width\n e_h += np.pi / 2\n e_h = normalize(e_h, 0, 2*np.pi, 2*np.pi)\n e_sx = e_s * np.cos(e_h)\n e_sy = e_s * np.sin(e_h)\n\n # lane info\n lane_id = ego_state.lane_id\n lane_h, lane_y = get_lane_info(lane_id, e_x)\n lane_offset = (e_y - lane_y) * np.cos(lane_h)\n lane_relative_h = e_h - lane_h\n e_s_lane = e_s * np.cos(lane_relative_h)\n e_s_lateral = e_s * np.sin(lane_relative_h)\n marker_dist_l = 25.02 - e_y\n if e_x < 180:\n _h, _y = get_lane_info('gneE05a_0', e_x)\n marker_dist_r = e_y - (_y - lane_width['gneE05a_0'] / 2 / np.cos(_h))\n else:\n marker_dist_r = e_y - (3.49 - lane_width['gneE51_0'] / 2)\n\n # neighbor info\n radius_sample = 8\n radius = list(np.arange(0, 2 * np.pi, 2 * np.pi / radius_sample))\n # dx, dy, dsx, dxy, h, l, w\n neibor_info = np.zeros((radius_sample, 7))\n closest_neibor = {i:None for i in range(radius_sample)}\n max_dists = 50\n closest_neibor_dist = np.ones(radius_sample) * max_dists\n for vehicle_state in vehicles:\n if not np.any(vehicle_state):\n continue\n x, y, l, w, heading, speed = vehicle_state\n dist = np.sqrt((x-e_x)**2 + (y-e_y)**2)\n if dist >= max_dists:\n continue\n r_h = np.arctan((y-e_y)/(x-e_x))\n if x-e_x<0:\n r_h += np.pi\n r_h = normalize(r_h, 0, 2*np.pi, 2*np.pi)\n for i in reversed(range(radius_sample)):\n if r_h >= radius[i]:\n if dist < closest_neibor_dist[i]:\n closest_neibor_dist[i] = dist\n closest_neibor[i] = vehicle_state\n break\n for i in range(radius_sample):\n if closest_neibor[i] is None:\n continue\n vehicle_state = closest_neibor[i]\n x, y, l, w, heading, speed = vehicle_state\n heading += np.pi / 2\n heading = normalize(heading, 0, 2*np.pi, 2 * np.pi)\n s_x = speed * np.cos(heading)\n s_y = speed * np.sin(heading)\n neibor_info[i][0] = x - e_x\n neibor_info[i][1] = y - e_y\n neibor_info[i][2] = s_x - e_sx\n neibor_info[i][3] = s_y - e_sy\n neibor_info[i][4] = np.sin(heading)\n neibor_info[i][5] = l\n neibor_info[i][6] = w\n\n ego_info = np.array([\n e_x, e_y, np.sin(lane_relative_h), lane_offset, e_l, e_w, e_s_lane, e_s_lateral,\n e_s, marker_dist_l, marker_dist_r\n ])\n\n # all info\n feature = np.concatenate((ego_info, neibor_info.reshape(-1)))\n return feature\n\n\ndef feature15_descriptor(obs):\n return obs[2:]\n\n\ndef feature15_descriptor1(obs):\n return obs[1:]\n\n\ndef feature15_descriptor4(obs):\n e_x, e_y, sin_h, lane_offset, e_l, e_w, e_s_lane, e_s_lateral, \\\n e_s, marker_dist_l, marker_dist_r = obs[:-56]\n _obs = [sin_h, lane_offset, marker_dist_r, marker_dist_l]\n front1 = obs[11:18]\n front2 = obs[-7:]\n lside1 = obs[18:25]\n lside2 = obs[25:32]\n back1 = obs[32:39]\n back2 = obs[39:46]\n rside1 = obs[46:53]\n rside2 = obs[53:60]\n frontinfo = get_info(front1, front2, 1, e_l, e_w)\n lsideinfo = get_info(lside1, lside2, 2, e_l, e_w)\n backinfo = get_info(back1, back2, 3, e_l, e_w)\n rsideinfo = get_info(rside1, rside2, 4, e_l, e_w)\n _obs = _obs + frontinfo + lsideinfo + backinfo + rsideinfo\n\n return _obs\n\n\ndef get_info(neighbor1, neighbor2, flag, el, ew):\n if neighbor1[0] == 0 and neighbor2[0] == 0:\n return [0, 0, 0, 0]\n if neighbor1[0] == 0:\n n = 2\n if neighbor2[0] == 0:\n n = 1\n d1 = neighbor1[0]**2 + neighbor1[1]**2\n d2 = neighbor2[0]**2 + neighbor2[1]**2\n if d1 < d2:\n n = 1\n else:\n n = 2\n if n == 2:\n l, w = neighbor2[-2:]\n dx = neighbor2[0]\n dy = neighbor2[1]\n dsx, dsy = neighbor2[2:4]\n if n == 1:\n l, w = neighbor1[-2:]\n dx = neighbor1[0]\n dy = neighbor1[1]\n dsx, dsy = neighbor1[2:4]\n if flag == 1:\n dx -= (l + el) / 2\n if flag == 2:\n dy -= (w + ew) / 2\n if flag == 3:\n dx += (l + el) / 2\n if flag == 4:\n dy += (w + ew) / 2\n return [dx, dy, dsx, dsy]\n\n\ndef filter(actions):\n action = np.array(actions)\n action[1:-1] = 0.5 * action[1:-1] + 0.25 * (action[:-2] + action[2:])\n return action.tolist()\n\n\ndef get_corners(x, y, l, w, heading):\n corner_pass = [(l / 2, w / 2), (l / 2, -w / 2), (-l / 2, -w / 2), (-l / 2, w / 2)]\n corners = []\n for i in range(4):\n p = corner_pass[i]\n xc, yc = x, y\n xc += (p[0] * np.cos(heading) + p[1] * np.sin(heading))\n yc += (p[0] * np.sin(heading) - p[1] * np.cos(heading))\n corners.append((xc, yc))\n return corners\n\n\ndef normalize(angle, low, high, T):\n _angle = angle\n while _angle < low:\n _angle += T\n while _angle >= high:\n _angle -= T\n return _angle\n\n\ndef get_cross_point_dist(e_x, e_y, real_r, corners, n_h):\n _real_r = normalize(real_r, -np.pi/2, np.pi/2, np.pi)\n # pdb.set_trace()\n corner_h = [n_h+np.pi/2, n_h, n_h+np.pi/2, n_h]\n candidate_x = []\n candidate = []\n for i in range(4):\n if abs(_real_r - corner_h[i]) < 1e-4:\n continue\n if abs(_real_r - np.pi/2) < 1e-5 or abs(_real_r + np.pi/2) < 1e-5:\n if abs(corner_h[i] - np.pi / 2) < 1e-5 or abs(corner_h[i] + np.pi / 2) < 1e-5:\n continue\n x = e_x\n elif abs(corner_h[i] - np.pi/2) < 1e-5 or abs(corner_h[i] + np.pi/2) < 1e-5:\n x = corners[i][0]\n else:\n x = (corners[i][1]-e_y+e_x*np.tan(_real_r)-corners[i][0]*np.tan(corner_h[i])) \\\n / (np.tan(_real_r) - np.tan(corner_h[i] + 1e-8))\n if abs(x) > 1e4:\n continue\n if np.abs(np.tan(_real_r)) > 1e4:\n y = corners[i][1] + (x-corners[i][0])*np.tan(corner_h[i])\n else:\n y = e_y + (x-e_x) * np.tan(_real_r)\n if abs(y) > 1e3:\n continue\n if (corners[i][0]-x)*(corners[(i+1)%4][0]-x)<=0 or (corners[i][1]-y)*(corners[(i+1)%4][1]-y)<=0:\n candidate_x.append(x)\n candidate.append((x, y))\n # pdb.set_trace()\n dist = [np.sqrt((p[0]-e_x)**2 + (p[1]-e_y)**2) for p in candidate]\n return np.min(dist)\n\n\ndef KalmanFilter(z):\n n_iter = len(z)\n # 这里是假设A=1,H=1的情况\n\n # intial parameters\n\n sz = (n_iter,) # size of array\n\n # Q = 1e-5 # process variance\n Q = 1e-6 # process variance\n # allocate space for arrays\n xhat = np.zeros(sz) # a posteri estimate of x\n P = np.zeros(sz) # a posteri error estimate\n xhatminus = np.zeros(sz) # a priori estimate of x\n Pminus = np.zeros(sz) # a priori error estimate\n K = np.zeros(sz) # gain or blending factor\n\n R = 0.1 ** 2 # estimate of measurement variance, change to see effect\n\n # intial guesses\n xhat[0] = z[0]\n P[0] = 1.0\n A = 1\n H = 1\n\n for k in range(1, n_iter):\n # time update\n xhatminus[k] = A * xhat[k - 1] # X(k|k-1) = AX(k-1|k-1) + BU(k) + W(k),A=1,BU(k) = 0\n Pminus[k] = A * P[k - 1] + Q # P(k|k-1) = AP(k-1|k-1)A' + Q(k) ,A=1\n\n # measurement update\n K[k] = Pminus[k] / (Pminus[k] + R) # Kg(k)=P(k|k-1)H'/[HP(k|k-1)H' + R],H=1\n xhat[k] = xhatminus[k] + K[k] * (z[k] - H * xhatminus[k]) # X(k|k) = X(k|k-1) + Kg(k)[Z(k) - HX(k|k-1)], H=1\n P[k] = (1 - K[k] * H) * Pminus[k] # P(k|k) = (1 - Kg(k)H)P(k|k-1), H=1\n return xhat\n\n\ndef get_x(x, y):\n virtual_x = np.zeros_like(x)\n dx = np.sqrt((y[1:]-y[:-1])**2 + (x[1:]-x[:-1])**2)\n for i in range(len(x)-1):\n virtual_x[i+1] = virtual_x[i]+dx[i]\n return virtual_x\n\n\ndef show_filter(v, a):\n vf, af, _ = kalman_filter(v, a, a, 0.1)\n plt.plot(range(len(v)), v)\n plt.plot(range(len(v)), vf)\n plt.savefig('test/vf.png')\n plt.close()\n plt.plot(range(len(v)), a)\n plt.plot(range(len(v)), af)\n plt.savefig('test/af.png')\n plt.close()","repo_name":"RUOKUNH/gail-auto-driving","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14306,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"5712104289","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.homepage),\n path('search_trail', views.search_trail),\n path('trail_profile/', views.trail_profile),\n path('favorite_trail', views.favorite_trail),\n path('user_profile/', views.user_profile),\n\n]","repo_name":"Enrique-ibarra1/on_the_trail","sub_path":"on_the_trail/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37207928848","text":"'''\n@File : first_log_to_db.py\n@Time : 2021/11/17 20:11:35\n@Author : Chengze Zhang\n@Contact : 929160190@qq.com\n'''\n\nimport logging as log\nfrom typing import List, Tuple\n\nfrom base.log_entry import extractLogEntry\nfrom base.url_entry import UrlEntry\nfrom base.url_match import UrlMatcher\nfrom sql.db import DB as db\nfrom sql.db import (ORIGIN_TABLE_LABELS, ORIGIN_TABLE_NAME,\n URL_ENTRY_TABLE_LABELS, URL_ENTRY_TABLE_NAME)\nfrom utils.file_utils import getFilesByPath\nfrom utils.logger_utils import logInit\n\nORIGIN_LOG_LOCAL_PATH = '../../origindata'\n# 本地原始日志的存放路径\n\nLOG_FILE_SUFFIX = '.log'\n# 原始日志文件的格式/后缀名\n\n\nURL_ENTRY_LOCAL_PATH = \"data/urls\"\n# 本地urlEntry的文件路径\n\nURL_ENTRY_FILE_SUFFIX = '.urls'\n# 本地urlEntry的后缀名\n\n\ndef uploadUrlEntryToDB():\n \"\"\"\n uploadUrlEntryToDB 仅在第一次调用,适用于将本地urls文件中的 urlEntry 上传到数据库中\n \"\"\"\n files = getFilesByPath(\n URL_ENTRY_LOCAL_PATH, URL_ENTRY_FILE_SUFFIX, [], [])\n for filename in files:\n log.info('开始处理%s' % filename)\n successCount, failCount = 0, 0\n with open(filename, 'r') as f:\n values = []\n for line in f:\n try:\n entry = UrlEntry(line)\n values.append(entry.getSet())\n successCount += 1\n except Exception as e:\n log.debug(e)\n failCount += 1\n log.info('识别到%d条规则, 结构化成功:%d, 失败:%d' %\n (successCount+failCount, successCount, failCount))\n try:\n db.insert(URL_ENTRY_TABLE_NAME, URL_ENTRY_TABLE_LABELS, values)\n except Exception as e:\n log.error(e)\n\n\ndef downloadUrlEntryFromDB() -> List[UrlEntry]:\n \"\"\"\n downloadUrlEntryFromDB 从数据库中获取所有的Entry\n \"\"\"\n try:\n entrySets = db.query(URL_ENTRY_TABLE_NAME, URL_ENTRY_TABLE_LABELS)\n entrys = []\n for entry in entrySets:\n entrys.append(UrlEntry(paraSet=entry))\n return entrys\n except Exception:\n return []\n\n\ndef getUrlEntryFromLocal() -> List[UrlEntry]:\n \"\"\"\n getUrlEntryFromLocal 从本地获取 UrlsEntry\n Returns:\n List[UrlEntry]: 返回所有的 urlEntry\n \"\"\"\n files = getFilesByPath(\n URL_ENTRY_LOCAL_PATH, URL_ENTRY_FILE_SUFFIX, [], [])\n entrys = []\n for filename in files:\n log.info('开始处理%s' % filename)\n successCount, failCount = 0, 0\n with open(filename, 'r') as f:\n for line in f:\n try:\n entry = UrlEntry(line)\n entrys.append(entry)\n successCount += 1\n except Exception as e:\n log.debug(e)\n failCount += 1\n\n log.info('识别到%d条规则, 结构化成功:%d, 失败:%d' %\n (successCount+failCount, successCount, failCount))\n return entrys\n\n\ndef uploadOriginLogsToDB():\n \"\"\"\n readLogsAndUploadToDB 读取本地日志文件,并上传到数据库中\n \"\"\"\n files = getFilesByPath(\n ORIGIN_LOG_LOCAL_PATH, LOG_FILE_SUFFIX, ['icaslog'], ['api', 'token'])\n\n # 生成 urlMatch 用于 url 归一化,如果抛出异常,则自动退出\n matcher = UrlMatcher(downloadUrlEntryFromDB())\n # url = '/fa/commoncore/assetCommonCore/remoteGetCurrenUserInfo'\n # print(matcher.convert(url, 'GET'))\n allSuccess, allFail = 0, 0\n for filename in files:\n log.info('开始处理%s' % filename)\n successCount, failCount = 0, 0\n with open(filename, 'r') as f:\n values = []\n for line in f:\n try:\n value = extractLogEntry(line)\n value.append(matcher.convert(value[4], value[5]))\n log.debug(\"dirtyEntry: %s, urlEntry: %s\" %\n (value[4], value[15]))\n values.append(tuple(value))\n successCount += 1\n except Exception as e:\n log.debug(e)\n failCount += 1\n log.info('识别到%d条日志, 结构化成功:%d, 失败:%d' %\n (successCount+failCount, successCount, failCount))\n allSuccess, allFail = allSuccess + successCount, allFail + failCount\n try:\n db.insert(ORIGIN_TABLE_NAME, ORIGIN_TABLE_LABELS, values)\n except Exception as e:\n log.error(e)\n\n log.info('共处理%d个文件,共计%d条日志,成功处理%d条,失败%d条' %\n (len(files), allSuccess + allFail, allSuccess, allFail))\n\n\nif __name__ == '__main__':\n logInit(__file__, log.INFO, isStreaming=True)\n uploadOriginLogsToDB()\n","repo_name":"chasezcz/LogAnomalyIdentify","sub_path":"src/upload_data_to_db.py","file_name":"upload_data_to_db.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"654449585","text":"class Node(object):\n\n def __init__(self, value, *args, **kwargs):\n self.value = value\n self.next = None\n\n def __repr__(self):\n return str(self.value)\n\n\nclass LinkedList(object):\n\n def __init__(self, *args, **kwargs):\n self.head = None\n\n def __repr__(self):\n node = self.head\n if not node:\n return None\n\n values = [str(node.value)]\n while node.next:\n node = node.next\n values.append(str(node.value))\n\n return ' '.join(values)\n\n def size(self):\n node = self.front()\n if not node:\n return 0\n\n length = 1\n\n while node.next:\n length += 1\n node = node.next\n\n return length\n\n def empty(self):\n return self.head is None\n\n def value_at(self, index):\n node = self.front()\n if not node:\n return\n\n for i in range(index):\n if node.next is None:\n raise KeyError(\"Index does not exist.\")\n\n node = node.next\n\n return node.value\n\n def push_front(self, value):\n node = self.front()\n self.head = Node(value)\n self.head.next = node\n\n def pop_front(self):\n node = self.front()\n self.head = node.next\n value = node.value\n del node\n return value\n\n def push_back(self, value):\n node = self.back()\n if not node:\n self.head = Node(value)\n return\n\n node.next = Node(value)\n\n def pop_back(self):\n prev = None\n node = self.head\n if not node:\n return\n\n while node.next:\n prev = node\n node = node.next\n\n prev.next = None\n value = node.value\n del node\n\n return value\n\n def front(self):\n return self.head\n\n def back(self):\n if self.head is None:\n return None\n\n node = self.head\n while node.next:\n node = node.next\n\n return node\n\n def insert(self, index, value):\n node = self.front()\n if not node:\n return\n\n for i in range(index):\n if node.next is None:\n raise KeyError(\"Index does not exist.\")\n\n node = node.next\n\n new_node = Node(value)\n new_node.next = node.next\n node.next = new_node\n\n def erase(self, index):\n node = self.front()\n prev = None\n\n if not node:\n return\n\n for _ in range(index):\n if node.next is None:\n raise KeyError(\"Index does not exist.\")\n\n prev = node\n node = node.next\n\n if prev:\n prev.next = node.next\n else:\n self.head = node.next\n\n del node\n\n def value_n_from_end(self, n):\n p1 = self.front()\n p2 = self.front()\n\n length = 0\n while p2.next:\n p2 = p2.next\n length += 1\n\n for _ in range(length-n):\n p1 = p1.next\n\n return p1.value\n\n def reverse(self):\n prev = None\n nextNode = None\n node = self.head\n if not node:\n return\n\n while node:\n nextNode = node.next\n node.next = prev\n prev = node\n node = nextNode\n\n self.head = prev\n\n def remove_value(self, value):\n node = self.front()\n index = 0\n if node == value:\n self.erase(index)\n\n while node.next:\n node = node.next\n index += 1\n\n if node == value:\n self.erase(index)\n\n\nlst = LinkedList()\nlst.push_back(1)\nlst.push_back(2)\nlst.push_back(3)\n\nassert (lst.size() == 3), \"%s is not 3\" % lst.size()\n\nlst.reverse()\nassert (str(lst) == \"3 2 1\"), \"%s is not list reversed\" % lst\n\nlst.erase(1)\n\nassert (str(lst) == \"3 1\"), \"%s index 0 is not removed.\" % lst\n","repo_name":"marcuslind90/python-challenges","sub_path":"datastructures/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41659795371","text":"import os\nimport yaml\nfrom collections import OrderedDict\n\ndef get_tutorial_settings(tutorial_slug, chapter_slug):\n tutorial_name = tutorial_slug\n chapter_name = chapter_slug\n\n MATERIALS_IGNORED_FILES = [\"SETTINGS.yaml\", \"images\"]\n\n material_path = os.path.join(os.getcwd(), 'materialy', tutorial_name)\n\n files_in_dir = os.listdir(material_path)\n folders_in_dir = []\n\n files_in_dir.sort()\n \n for _ignored_filename in MATERIALS_IGNORED_FILES:\n if _ignored_filename in files_in_dir:\n files_in_dir.remove(_ignored_filename)\n \n for _file in files_in_dir:\n if os.path.isdir(os.path.join(material_path, _file)):\n folders_in_dir.append(_file)\n\n with open(os.path.join('materialy', tutorial_name, 'SETTINGS.yaml'), encoding=\"utf8\") as file:\n material_settings = yaml.full_load(file)\n\n material_settings['content'] = OrderedDict()\n\n material_settings['chapter_id'] = \"0.\"\n\n material_settings['prev_chapter'] = None\n material_settings['next_chapter'] = None\n\n prev_chapter = None\n _next_chapter_flag = False\n\n for _folder in folders_in_dir:\n _chapter_slug = _folder.split(\"_\")[2:]\n _chapter_slug = \"_\".join(_chapter_slug)\n material_settings['content'][_chapter_slug] = {}\n material_settings['content'][_chapter_slug]['path'] = f\"{os.path.join(_folder, _chapter_slug)}.md\"\n\n if not chapter_name:\n material_settings['chapter_name'] = _chapter_slug\n chapter_name = _chapter_slug\n \n if _folder.split(\"_\")[1] == \"00\":\n material_settings['content'][_chapter_slug]['chapter'] = True\n material_settings['content'][_chapter_slug]['index'] = f\"{int(_folder.split('_')[0])}.\"\n else:\n material_settings['content'][_chapter_slug]['index'] = f\"{int(_folder.split('_')[0])}.{int(_folder.split('_')[1])}.\"\n\n if _next_chapter_flag:\n material_settings['next_chapter'] = _chapter_slug\n _next_chapter_flag = False\n\n if _chapter_slug == chapter_name:\n material_settings['chapter_id'] = material_settings['content'][_chapter_slug]['index']\n material_settings['chapter_folder'] = _folder\n material_settings['prev_chapter'] = prev_chapter\n _next_chapter_flag = True\n\n if os.path.exists(os.path.join(material_path, _folder, \"video.md\")):\n material_settings['content'][_chapter_slug]['video'] = True\n\n prev_chapter = _chapter_slug\n\n return material_settings","repo_name":"ucimeshardverom/ucimeshardverom-website-old","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"35582181533","text":"from time import perf_counter\nimport numpy as np\n\nALFA = '_ABCDEFGHIJKLMNO' # enbart for indexkonvertering. Ej goal i Borowski!\n\n# A B C D E F G H I J K L M N O\n# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\n#tileSubsets = [-1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] # ABCDEFGH IJKLMNO\n#tilePositions = [-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6]\n\n#tileSubsets = [-1, 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2] # ABCEF DGHKL IJMNO\n#tilePositions = [-1, 0, 1, 2, 0, 3, 4, 1, 2, 0, 1, 3, 4, 2, 3, 4]\n\n# A B C D E F G H I J K L M N O\n# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\n#tileSubsets = [-1, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 1, 0, 1, 1, 1] # GHL AEFIJM GHKLNO\n#tilePositions = [-1, 0, 1, 2, 3, 4, 5, 0, 1, 0, 1, 2, 2, 3, 4, 5] #\n\ntileSubsets = [-1, 1, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 2, 2] # BCD AEFIJM GHKLNO\ntilePositions = [-1, 0, 0, 1, 2, 1, 2, 0, 1, 3, 4, 2, 3, 5, 4, 5] #\n\nGOAL = 'ABCDEFGHIJKLMNO_'\n\nN = 4\nnodeCount = 0\nnodes = {}\ndb = {}\n\ncostTable0 = None\ncostTable1 = None\ncostTable2 = None\n\ndef loadCostTable(filename, n):\n\twith open(filename, \"rb\") as f: return f.read(n)\ncostTable0 = loadCostTable('db/15-puzzle-663-0.db',4096) # BCD\ncostTable1 = loadCostTable('db/15-puzzle-663-1.db',16777216) # AEFIJM\ncostTable2 = loadCostTable('db/15-puzzle-663-2.db',16777216) # GHKLNO\n\nclass Node:\n\n\tdef __init__(self,state,move=0):\n\t\tglobal nodeCount\n\t\tself.state = state\n\t\tself.move = move\n\t\tnodeCount += 1\n\n\tdef is_goal(self): return self.state == GOAL\n\n\tdef successors(self): # 0 moves\n\t\tdef swap(m):\n\t\t\tif self.move == -m: return\n\t\t\tsk = list(self.state)\n\t\t\tsk[loc], sk[loc+m] = sk[loc+m], sk[loc]\n\t\t\tret.append(Node(''.join(sk),m))\n\t\tret = []\n\t\tloc = self.state.index('_')\n\t\tif loc % N != 0: swap(-1) # Left\n\t\tif loc % N != N-1: swap(+1) # Right\n\t\tif loc//N != 0: swap(-N) # Up\n\t\tif loc//N != N-1: swap(+N) # Down\n\t\treturn ret\n\n\tdef succ(self): # builds pattern database\n\t\tdef swap(i,j):\n\t\t\tif self.state[j] == '.':\n\t\t\t\tsk = list(self.state)\n\t\t\t\tsk[i], sk[j] = sk[j], sk[i]\n\t\t\t\tstate = ''.join(sk)\n\t\t\t\tif state not in nodes: ret.append(Node(state))\n\t\tret = []\n\t\tfor i,tile in enumerate(self.state):\n\t\t\tif tile != '.':\n\t\t\t\tif i % N != 0: swap(i,i-1) # Left\n\t\t\t\tif i % N != N - 1: swap(i,i+1) # Right\n\t\t\t\tif i // N != 0: swap(i,i-N) # Up\n\t\t\t\tif i // N != N - 1: swap(i,i+N) # Down\n\t\treturn ret\n\n\tdef dbindex(self,sn): # builds pattern database\n\t\tresult = 0\n\t\tfor pos in range(16):\n\t\t\tif self.state[pos] == '.': continue\n\t\t\ttile = ALFA.index(self.state[pos])\n\t\t\tif tile != 0 and sn == tileSubsets[tile]:\n\t\t\t\tresult |= pos << (tilePositions[tile] << 2)\n\t\treturn result\n\n\tdef display(self):\n\t\tresult = ''\n\t\tfor i in range(N*N):\n\t\t\tif i % N == 0: result += \"\\n\"\n\t\t\tresult += ' ' + self.state[i]\n\t\tresult += ' ' + self.state + ' value=' + str(self.h())\n\t\tif self.state == GOAL: result += \" Solved!\"\n\t\treturn result\n\n\n\tdef h(self):\n\t\t# def manhattan(i, j):\n\t\t# \treturn abs(i // N - j // N) + abs(i % N - j % N)\n\t\t# def manhattanTotal():\n\t\t# \treturn 1.0 * sum([manhattan(i, GOAL.index(self.state[i])) for i in range(N * N) if self.state[i] != '_'])\n\t\t# return manhattanTotal()\n\n\t\tindex0 = 0\n\t\tindex1 = 0\n\t\tindex2 = 0\n\t\tfor pos in range(N*N-1, -1, -1):\n\t\t\ttile = ALFA.index(self.state[pos])\n\t\t\tif tile != 0:\n\t\t\t\tsubsetNumber = tileSubsets[tile]\n\t\t\t\tif subsetNumber == 0: index0 |= pos << (tilePositions[tile] << 2)\n\t\t\t\tif subsetNumber == 1: index1 |= pos << (tilePositions[tile] << 2)\n\t\t\t\tif subsetNumber == 2: index2 |= pos << (tilePositions[tile] << 2)\n\t\treturn costTable0[index0] + costTable1[index1] + costTable2[index2]\n\ndef makeCostTable(sn,state,n): # builds pattern database\n\tprint('')\n\tglobal nodes\n\tresult = [255]*n\n\n\tnodes = {}\n\tnode = Node(state)\n\tq = [node]\n\tlevel = 0\n\twhile True:\n\t\tr = []\n\t\tif len(q)==0: break\n\t\tfor node in q:\n\t\t\tif node.state in nodes: continue\n\t\t\tindex = node.dbindex(sn)\n\t\t\tnodes[node.state] = True\n\t\t\tresult[index] = level\n\t\t\tfor child in node.succ():\n\t\t\t\tif child.state not in nodes: r.append(child)\n\t\tlevel += 1\n\t\tprint(f'moves : {level:2} {(perf_counter() - start):10.3}')\n\t\tq = r\n\treturn result\n\nstart = perf_counter()\n#costTable1 = makeCostTable(1,\"........IJKLMNO.\",16**7) # IJKLMNO\n# print(perf_counter() - start)\n# costTable0 = makeCostTable(0,\"ABCDEFGH........\",16**8) # ABCDEFGH\n# print(perf_counter() - start)\n\n# costTable0 = makeCostTable(0,\"ABC.EF..........\",16**5) # ABCEF 16 sec\n# costTable1 = makeCostTable(1,\"...D..GH..KL....\",16**5) # DGHKL 16 sec\n# costTable2 = makeCostTable(2,\"........IJ..MNO.\",16**5) # IJMNO 16 sec\n\n# costTable0 = makeCostTable(0,\"......GH...L....\",16**3) # GHL 0.059 sec\n# costTable1 = makeCostTable(0,\"ABCDEF..........\",16**6) # ABCDEF 180 sec\n# costTable2 = makeCostTable(0,\"........IJK.MNO.\",16**6) # IJKMNO 184 sec\n\n# costTable0 = makeCostTable(0,\".BCD............\",16**3) # BCD 0 sec\n# costTable1 = makeCostTable(1,\"A...EF..IJ..M...\",16**6) # AEFIJM 200 sec\n# costTable2 = makeCostTable(2,\"......GH..KL.NO.\",16**6) # GHKLNO 200 sec\nprint(perf_counter() - start)\n\ndef dfs(node, limit, path=[]):\n\tif node.state in nodes and len(path) >= nodes[node.state]: return []\n\tnodes[node.state] = len(path)\n\tif node.is_goal(): return path\n\tif len(path) == limit: return []\n\tfor child in node.successors():\n\t\tif len(path) + 1 + child.h() <= limit:\n\t\t\tp = dfs(child, limit, path + [child])\n\t\t\tif len(p) > 0: return p\n\treturn []\n\n# def skapaDB(node, limit, level = 0):\n# \tif node.state in db: return\n# \tdb[node.state] = level\n# \tif level == limit: return\n# \tfor child in node.successors():\n# \t\tp = skapaDB(child, limit, level+1)\n# \t\tif len(p) > 0: return\n# \treturn\n\n\ndef skapaDB(n=17):\n\tdb = {}\n\tnode = Node('ABCDEFGHIJKLMNO_')\n\tnode.parent = ''\n\tq1 = [node]\n\tfor i in range(n):\n\t\tq2 = []\n\t\tfor node in q1:\n\t\t\tif node.state not in db:\n\t\t\t\tdb[node.state] = [i,node.parent]\n\t\t\t\tfor child in node.successors():\n\t\t\t\t\tchild.parent = node.state\n\t\t\t\t\tq2.append(child)\n\t\tq1 = q2\n\treturn db\n\n# pga olika goals.\ndef boro(lst): return ''.join([ALFA[lst[i]] for i in range(16)])\ndef korf(lst):\n\tprint(''.join(['_ABCDEFGHIJKLMNO_'[16-lst[15-i]] for i in range(16)]))\n\tlst = [16 if lst[i]==0 else lst[i] for i in range(16)]\n\tprint([16-lst[15-i] for i in range(16)]) # to borowski\n\nstart = perf_counter()\n#db = skapaDB(17)\nprint(nodeCount, perf_counter() - start)\n# for node in db:\n# \tprint(node,db[node])\n\n\n# Two different goal states:\n\n# _ A B C Korf, Culberson, Gasser\n# D E F G\n# H I J K\n# L M N O\n\n# A B C D Borowski\n# E F G H\n# I J K L\n# M N O _\n\n#boro([9,8,3,6,0,13,1,4,5,2,15,7,10,11,12,14]) # IHCF_MADEBOGJKLN 41 OK! 0.065 sek (java 15ms)\n#korf([3,14,9,11,5,4,8,2,13,12,6,7,10,1,15,0]) # _AOFIJDCNHLKEGBM 46 OK! 0.298 sek (java 15ms)\n#boro([9,7,5,2,0,3,1,13,11,4,6,10,15,12,14,8]) # IGEB_CAMKDFJOLNH 51 OK! 0.596 sek (java 31ms)\n # ONAFIKDL_JGCMHEB 52 PDB663 = 3.208 sek MD = 160.8 sek\n#boro([15,6,7,3,2,5,1,10,4,0,9,13,12,11,8,14]) # OFGCBEAJD_IMLKHN 53 OK! 1.232 sek (java 46ms)\n#boro([10,3,12,5,6,15,13,7,14,11,9,2,0,4,1,8]) # JCLEFOMGNKIB_DAH 57 OK! 1.981 sek (java 31ms)\n#boro([14,15,7,9,5,2,8,6,10,4,1,0,12,11,3,13]) # NOGIEBHFJDA_LKCM 61 OK! 76.166 sek (java 989ms)\n#boro([14,13,15,8,7,12,5,9,3,2,10,0,1,4,11,6]) # NMOHGLEICBJ_ADKF 63 OK! 67.353 sek (java 579ms)\n#korf([11,15,14,13,1,9,10,4,3,6,2,12,7,5,8,0]) # _HKIDNJMLFGOCBAE 64 NY! 8.867 sek (java 47ms)\n#boro([15,14,7,11,0,10,6,1,12,2,13,5,3,4,8,9]) # ONGK_JFALBMECDHI 65 OK! 33.505 sek (java 297ms)\n#korf([11,14,13,1,2,3,12,4,15,7,9,5,10,6,8,0]) # _HJFKGIALDMNOCBE 66 OK! 87.986 sek (java 703ms)\n#korf([15,14,8,12,10,11,9,13,2,6,5,1,3,7,4,0]) # _LIMOKJNCGEFDHBA 80 OK! 22 h (java 255sek) (Gasser)\n\n# def convert(state):\n# \tres = ''\n# \tfor ch in state:\n# \t\ti = '_ABCDEFGHIJKLMNO'.index(ch)\n# \t\tres += '0123456789ABCDEF'[i]\n# \treturn res\n\nstart = perf_counter()\n#ALFA = convert(ALFA)\n#GOAL = convert(GOAL)\nstartNode = Node('ONAFIKDL_JGCMHEB')\n\n#fe169b4c0a73d852\n#ONAFIKDL_JGCMHEB\n\nfor limit in range(99):\n\tnodes = {}\n\tprint(f\"Limit Search at level {limit}\",nodeCount,perf_counter() - start)\n\tpath = dfs(startNode, limit)\n\tif len(path) > 0:\n\t\tprint(f\"Finished {limit}\", nodeCount, perf_counter() - start)\n\t\tfor p in path: print(p.display())\n\t\tif path[-1].state in db:\n\t\t\tprint('\\nfinns i db!')\n\t\t\tstate = db[path[-1].state][1]\n\t\t\twhile state != '':\n\t\t\t\tnode = Node(state)\n\t\t\t\tprint(node.display())\n\t\t\t\tstate = db[state][1]\n\t\tprint(len(path)) # + db[path[-1].state][0])\n\t\tbreak\n\n# nodes = {}\n# limit = 10\n# skapaDB(Node('_ABCDEFGHIJKLMNO', 'ONHLJKIMBFEACGD_'), limit)\n# print(f\"Limit Search at level {limit}\", nodeCount, perf_counter() - start)\n\n\t# def h_MDLC(self):\n\t#\n\t# \tdef manhattan(i, j): return abs(i // N - j // N) + abs(i % N - j % N)\n\t# \tdef manhattanTotal(): return 1.0 * sum([manhattan(i, self.target.index(self.state[i])) for i in range(N*N) if self.state[i] != '_'])\n\t#\n\t# \tdef movesForConflicts(conflictCount):\n\t# \t\ta,b,c,d = conflictCount\n\t# \t\tif a == 4: return 0\n\t# \t\telif d == 4:return 6\n\t# \t\telif b == 2 and c != 2 or b == 3:return 2\n\t# \t\telse: return 4\n\t#\n\t# \treturn manhattanTotal()\n\t#\n\t# \t#if self.state in db:\n\t# \t\t#print('h',manh,self.state,db[self.state][0])\n\t# \t\t#return db[self.state][0]\n\t# \t#else:\n\t#\n\t# \treqMoves = 0\n\t# \tcp = self.target # correct positions\n\t#\n\t# \thConflicts = [0] * 16\n\t# \tvConflicts = [0] * 16\n\t#\n\t# \tboard = self.state\n\t#\n\t# \tfor i in range(4): # row\n\t# \t\tfor j in range(4): # col\n\t# \t\t\tij = 4*i+j\n\t# \t\t\ttileij = board[ij]\n\t# \t\t\tif tileij != '_':\n\t#\n\t# \t\t\t\tif cp.index(tileij) // 4 == i:\n\t# \t\t\t\t\tfor k in range(j + 1, 4):\n\t# \t\t\t\t\t\tik = 4*i+k\n\t# \t\t\t\t\t\ttileik = board[ik]\n\t# \t\t\t\t\t\tif tileik != '_' and cp.index(tileik) // 4 == i and cp.index(tileik) % 4 < cp.index(tileij) % 4:\n\t# \t\t\t\t\t\t\thConflicts[ik] += 1\n\t# \t\t\t\t\t\t\thConflicts[ij] += 1\n\t#\n\t# \t\t\t\tif cp.index(tileij) % 4 == j:\n\t# \t\t\t\t\tfor k in range(i + 1, 4):\n\t# \t\t\t\t\t\tkj = 4*k+j\n\t# \t\t\t\t\t\ttilekj = board[kj]\n\t# \t\t\t\t\t\tif tilekj != '_' and cp.index(tilekj) % 4 == j and cp.index(tilekj) // 4 < cp.index(tileij) // 4:\n\t# \t\t\t\t\t\t\tvConflicts[kj] += 1\n\t# \t\t\t\t\t\t\tvConflicts[ij] += 1\n\t#\n\t# \tfor i in range(4):\n\t# \t\tconflictCount = [0, 0, 0, 0]\n\t# \t\tfor j in range(4):\n\t# \t\t\tij = 4*i+j\n\t# \t\t\tconflictCount[hConflicts[ij]] += 1\n\t# \t\t#print('a',conflictCount,movesForConflicts(conflictCount))\n\t# \t\treqMoves += movesForConflicts(conflictCount)\n\t#\n\t# \tfor j in range(4):\n\t# \t\tconflictCount = [0, 0, 0, 0]\n\t# \t\tfor i in range(4):\n\t# \t\t\tij = 4*i+j\n\t# \t\t\tconflictCount[vConflicts[ij]] += 1\n\t# \t\t#print('b',conflictCount,movesForConflicts(conflictCount))\n\t# \t\treqMoves += movesForConflicts(conflictCount)\n\t#\n\t# \t#print('h',self.state, vConflicts, hConflicts, reqMoves,manhattanTotal())\n\t# \treturn reqMoves + manhattanTotal()\n","repo_name":"ChristerNilsson/Lab","sub_path":"2019/015B-PuzzleCS50/IDA4.py","file_name":"IDA4.py","file_ext":"py","file_size_in_byte":10758,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"7726997677","text":"# import packages\nimport requests, csv\nfrom bs4 import BeautifulSoup\n\n# output location\noutputLoc = 'shared-connections.csv'\n\n# import file\nfileName = open('shared-connections.html')\nsoup = BeautifulSoup(fileName)\n\n# instantiate list\nmatches = []\n\n#for match in soup.select('match-entry'):\nfor match in soup.select('match-entry'):\n # instantiate compRow\n matchRow = []\n\n # identify user name and url\n matchUser = match.select('a.userCardTitle')[0]\n matchName = matchUser.getText()\n matchId = matchUser.get('href').split('/with/')[1]\n\n # identify shared dna\n matchDNA = match.select('span.dnaGrayDark')[0].getText().split(\": \")[1]\n\n matchRow = [matchName, matchId, matchDNA]\n\n matches.append(matchRow)\n\n# create results file\noutputFile = open(outputLoc, 'w')\noutputWriter = csv.writer(outputFile)\n\n# iterate through list and write to file\nfor match in matches:\n outputWriter.writerow(match)\n\n# close output file\noutputFile.close()\n","repo_name":"mackaltman/ancestrydna-sm-py-scrub","sub_path":"AncestryDNA-SharedMatches-Scrub.py","file_name":"AncestryDNA-SharedMatches-Scrub.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40644869305","text":"\"\"\"\nUS19 First cousins should not marry one another\n\nAuthor: Anas Alamoudui\n\nName this file: usXX.py and it to list in lib/__init__.py\nAdd usXX.check(individuals) in main.py\n\n\"\"\"\n\ndef check(individuals, families):\n indi = dict()\n for index,i in enumerate(individuals):\n tag = i['tag'] \n arg = i['arg'] \n\n if tag == 'INDI':\n id = arg\n indi[id] = arg\n if tag == 'NAME':\n indi[id] = arg\n \n married = dict()\n kids = dict()\n for index, i in enumerate(families):\n # get all married people in one list\n tag = i['tag'] \n arg = i['arg'] \n if tag == 'FAM':\n id = arg\n married[id] = list()\n kids[id] = list()\n if tag == 'HUSB' or tag =='WIFE':\n married[id].append(arg)\n # add children from each family in one list\n if tag == 'CHIL':\n kids[id].append(arg)\n # if childern are marrid and they are listed as children in \n # one family add all their kids to one list\n marrid_childern = list()\n for key,child in kids.items():\n for fam_key,couple in married.items():\n for i in child:\n if i in couple:\n marrid_childern.append(i)\n # check if their parents are in one list in the children lists \n for i,value in kids.items():\n a = set(marrid_childern) & set(value)\n if len(a) > 1:\n print('US19 First cousins should not marry one another')\n print('id:',i,'brothers',a)\n print(i,\">>\",a)\n\n \n\n","repo_name":"phunold/agile","sub_path":"lib/us19.py","file_name":"us19.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30749679926","text":"'''\nThis program demostrate a simple process for UR robot based 3D printing.\n1) Exturder controller and temperature controller are not given here.\n2) Path input is saved in numpy.array txt file\n3) Connect UR robot by urx\n\n'''\nimport traceback\nimport urx\nimport numpy as np\nimport serial, time\nfrom estimate_plane import estimate_plane\nfrom Coordinate_transformation import Coordinate_transformation\n\nclass PathManager(object):\n def __init__(self):\n self.path = None\n pass\n \n def load(self, path_file):\n '''\n return path represents by n*3 numpy.array[[x,y,z\n convert unit from millimeter to meters\n '''\n self.path = np.loadtxt(path_file) / 1000\n return self.path\n \n def get_homogeneous_coordinates(self):\n one_arr = np.ones([self.path.shape[0], 1])\n return np.hstack((self.path, one_arr)) \n \nclass URPrinter(object):\n def __init__(self):\n self.is_connected = False \n \n def connect(self, ip=\"169.254.204.33\"):\n try: \n self.rob = urx.Robot(ip)\n self.rob.set_tcp((0.00022,-0.00265,0.12321,0,0,0)) # pre measurement\n self.rob.set_payload(0.5, (0,0,0)) \n self.is_connected = True\n except Exception as e:\n print(traceback.format_exc())\n print(\"Begin simulation....\")\n \n def close(self): \n self.rob.close()\n self.is_connected = False\n \n def register_plane(self, N=3):\n '''\n return transform matrix R and tcp pose\n '''\n if not self.is_connected:\n self.R= [[ 0.67404229,-0.73868061,0.00423645,-0.21223078],\n [0.7386522,0.6740543,0.00661261,-0.30540021],\n [-0.0077402,-0.00132791,0.99996916,0.03895087],\n [0,0,0,1]] \n self.tcp_pose = [-3,-0.811891,-0.031571]\n return self.R, self.tcp_pose \n \n # get registered points\n points = np.ones((N,6))\n for i in range(N):\n input(\"Waiting to get coordinates:\") \n pose = rob.getl() \n points[i,:] = pose\n \n self.tcp_pose = points[0,3:].tolist()\n [a,b,c,d]=estimate_plane(points) \n self.R=Coordinate_transformation(points,a,b,c,d)\n return self.R, self.tcp_pose\n \n def prepare(self):\n pass\n \n def moveto(self, point):\n move = ','.join(map(str,point))\n programString = \"movej(p[\"+move+\"],a=1.4,v=1.04)\" \n if self.is_connected:\n self.rob.send_program(programString) \n else:\n print(programString)\n \n \n \nif __name__ == \"__main__\":\n pm = PathManager()\n pm.load(\"examples/standard.txt\")\n #init UR3 and TCP\n pr = URPrinter()\n pr.connect()\n #find and register print plane\n R, tcp_pose = pr.register_plane()\n \n pr.prepare()\n #send script to UR3\n homogeneous_coordinates = pm.get_homogeneous_coordinates()\n for p in homogeneous_coordinates:\n v = np.dot(R, p.T)\n v = v.tolist()\n v = v[0:3] + pr.tcp_pose\n \n pr.moveto(v) \n \n","repo_name":"fly2mars/robotPrinterDemo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71524311944","text":"class Constants:\n # logging\n LOGGING_FORMAT = \"%(asctime)s %(levelname)s: %(message)s\"\n\n # base\n BASE = \"https://trader.degiro.nl\"\n\n # login\n LOGIN = \"login/secure/login\"\n MFA = \"totp\"\n\n # account\n TRADER = \"trader/secure/profile\"\n ACCOUNT = \"pa/secure/client\"\n PF_DATA = \"trading/secure/v5/update/\"\n\n # product\n PRODUCT_INFO = \"product_search/secure/v5/products/info\"\n\n # requests client\n HEADERS = {\n \"access-control-allow-credentials\": \"true\",\n \"cache-control\": \"no-cache, no-store, must-revalidate\",\n \"content-encoding\": \"br\",\n \"content-security-policy\": \"block-all-mixed-content;\",\n \"content-type\": \"application/json\",\n \"date\": \"Thu, 09 Dec 2021 16:58:07 GMT\",\n \"expires\": \"0\",\n \"pragma\": \"no-cache\",\n \"server\": \"openresty\",\n \"strict-transport-security\": \"max-age=31536000; includeSubDomains\",\n \"vary\": \"Accept-Encoding\",\n }\n","repo_name":"danielsteman/depyro","sub_path":"src/depyro/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2620137587","text":"import random\nimport time\nimport asyncio\n\n\nclass BlackJack():\n\n def __init__(self):\n #self.msg = self.msg.split(\" \")[1]\n #self.bet2 = self.msg.split(\" \")[1]\n self.colors = [\"Karo\", \"Herz\", \"Pik\", \"Kreuz\"]\n self.numbers = [2, 3, 4, 5, 6, 7, 8, 9, 10, \"Bube\", \"Dame\", \"König\", \"Ass\"]\n self.hand = []\n self.werte = []\n self.possibilities = []\n for x in self.colors:\n for y in self.numbers:\n self.possibilities.append(f\"{x} {y}\")\n\n def shuffle_card(self):\n choice = random.choice(self.possibilities)\n choice_splitted = choice.split(\" \")\n card = f\"{choice_splitted[0]} {choice_splitted[1]}\"\n self.hand.append(card)\n self.possibilities.remove(str(choice))\n return card\n\n # print(shuffle_card().split())\n def get_wert(self,card):\n if card.split()[1] == \"Bube\" or card.split()[1] == \"Dame\" or card.split()[1] == \"König\":\n self.werte.append(10)\n return 10\n elif card.split()[1] == \"Ass\":\n if sum(self.werte) + 11 > 21:\n self.werte.append(1)\n return 1\n else:\n self.werte.append(11)\n return 11\n\n else:\n self.werte.append(int(card.split()[1]))\n return int(card.split()[1])\n\n async def play_spieler(self,wert,client):\n # print(wert)\n if wert == 21:\n await self.message.channel.send(f\"Du hast die Hand {self.hand} und B L A C K J A C K\")\n await self.message.channel.send(\"Herzlichen Glückwunsch!\")\n return\n elif wert > 21:\n await self.message.channel.send(f\"Du hast verloren mit der Hand {self.hand} und {wert} Punkten\")\n return\n else:\n await self.message.channel.send(\"Willst du noch eine Karte\")\n def is_correct(m):\n return m.author == self.message.author\n\n try:\n answer = await self.client.wait_for('message', check=is_correct, timeout=10.0)\n except asyncio.TimeoutError:\n return await self.message.channel.send(f'Sorry, so lang kann ja kein Mensch warten.')\n\n if answer.content == \"yes\":\n card = blackjack.shuffle_card()\n await self.message.channel.send(str(card))\n blackjack.get_wert(card)\n await blackjack.play_spieler(sum(self.werte),client)\n else:\n await self.message.channel.send(f\"du hast die Hand {self.hand} mit {wert} Punkten\")\n await self.message.channel.send('Jetzt spielt die Bank, bitte bestätigen')\n\n def is_correct(m):\n return m.author == self.message.author\n\n try:\n answer = await self.client.wait_for('message', check=is_correct, timeout=10.0)\n except asyncio.TimeoutError:\n return await self.message.channel.send(f'Sorry, so lang kann ja kein Mensch warten.')\n\n if answer.content == \"yes\":\n await self.message.channel.send(\"Jetzt spielt die Bank\")\n\n async def play_bank(self,wert,spielerpunktzahl):\n # print(wert)\n time.sleep(1)\n if wert > spielerpunktzahl and wert < 21:\n time.sleep(1)\n await self.message.channel.send(f\"Die Bank hat {self.hand} und {wert} Punkten und somit gewonnen\")\n elif wert > 21:\n time.sleep(1)\n await self.message.channel.send(f\"Die Bank hat die Hand {self.hand} und {wert} Punkten - somit verloren\")\n elif wert == 21:\n time.sleep(2)\n await self.message.channel.send(\"B L A C K J A C K - du hast leider verloren!\")\n return\n\n else:\n await self.message.channel.send(\"Die Bank nimmt noch eine Karte\")\n zufall = (21 - random.randint(2, 4))\n time.sleep(1)\n if wert < zufall and wert < spielerpunktzahl:\n card = blackjack.shuffle_card()\n await self.message.channel.send(str(card))\n blackjack.get_wert(card)\n await blackjack.play_bank(sum(self.werte),spielerpunktzahl)\n else:\n await self.message.channel.send(\n f\"Du hast {spielerpunktzahl} Punkte und die Bank hat die Hand {self.hand} mit {wert} Punkten und somit gewonnen\")\n\n async def startgame(self,msg,message,client):\n self.msg = msg\n self.message = message\n self.client = client\n card = blackjack.shuffle_card()\n blackjack.get_wert(card)\n await self.message.channel.send(f\"Deine erste Karte ist {card}\")\n await blackjack.play_spieler(sum(self.werte),client)\n spielerpunktzahl = sum(self.werte)\n await self.message.channel.send(f\"Du hast die Punktzahl {str(spielerpunktzahl)} und die die Karten {self.hand}\")\n self.werte = []\n self.hand = []\n card = blackjack.shuffle_card()\n blackjack.get_wert(card)\n await self.message.channel.send(f\"Die Bank zieht {card}\")\n await blackjack.play_bank(sum(self.werte),spielerpunktzahl)\n self.hand = []\n self.wert = []\n\nblackjack = BlackJack()","repo_name":"thom7e/Discordbot","sub_path":"discordblackjack.py","file_name":"discordblackjack.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17184784731","text":"file01 = open(\"/home/tarena/bbb.jpeg\", \"rb\")\nfile02 = open(\"/home/tarena/PycharmProjects/zhangmingyang/month02/day03/bbb.jpeg\", \"wb\")\n\nlist01 = []\nfor line in file01:\n list01.append(line)\n\nfile02.writelines(list01)\n\nfile01.close()\nfile02.close()","repo_name":"zmyg0321/AID2006","sub_path":"day16/FTP/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14220749048","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tab_interfaces.general_interface import Interface, get_folder_contents\n# from general_interface import Interface, get_folder_contents\n\nclass BWA(Interface):\n def __init__(self, notebook, parent, folder_path=\"/home/hhbach/gui/\"):\n self.notebook = notebook\n super().__init__(parent, folder_path)\n\n def create_input_panel(self, tab):\n # Create a frame for the input panel\n input_panel = ttk.Frame(tab)\n input_panel.pack(side=tk.RIGHT, fill=tk.BOTH, padx=10, pady=10)\n\n # Configure column weights\n input_panel.grid_columnconfigure(1, weight=1)\n\n # Create input fields\n num_threads_label = ttk.Label(input_panel, text=\"Number of Threads:\")\n num_threads_label.grid(row=0, column=0, sticky=tk.W)\n num_threads_entry = ttk.Entry(input_panel)\n num_threads_entry.grid(row=0, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n \n sra_id_label = ttk.Label(input_panel, text=\"SRA-ID:\")\n sra_id_label.grid(row=1, column=0, sticky=tk.W)\n sra_id_entry = ttk.Entry(input_panel)\n sra_id_entry.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n\n platform_label = ttk.Label(input_panel, text=\"Platform:\")\n platform_label.grid(row=2, column=0, sticky=tk.W)\n platform_entry = ttk.Entry(input_panel)\n platform_entry.grid(row=2, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n\n ref_genome_label = ttk.Label(input_panel, text=\"Reference genome:\")\n ref_genome_label.grid(row=3, column=0, sticky=tk.W)\n ref_genome_entry = ttk.Entry(input_panel)\n ref_genome_entry.grid(row=3, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n \n fwd_label = ttk.Label(input_panel, text=\"Forward read:\")\n fwd_label.grid(row=4, column=0, sticky=tk.W)\n fwd_entry = ttk.Entry(input_panel)\n fwd_entry.grid(row=4, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n\n bwd_label = ttk.Label(input_panel, text=\"Backward reads:\")\n bwd_label.grid(row=5, column=0, sticky=tk.W)\n bwd_entry = ttk.Entry(input_panel)\n bwd_entry.grid(row=5, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n\n output_label = ttk.Label(input_panel, text=\"Output Directory:\")\n output_label.grid(row=6, column=0, sticky=tk.W)\n output_entry = ttk.Entry(input_panel)\n output_entry.grid(row=6, column=1, padx=5, pady=5, sticky=tk.W+tk.E)\n\n # Create a button to process the entered data\n process_button = ttk.Button(input_panel, text=\"Process\", \n command=lambda: self.process_data(num_threads_entry, sra_id_entry, platform_entry, ref_genome_entry, fwd_entry, bwd_entry, output_entry, result_text))\n process_button.grid(row=7, column=0, columnspan=2, pady=10)\n\n # Create a label for displaying the result\n result_text = tk.Text(input_panel, width=200, height=50, relief=\"sunken\")\n result_text.grid(row=8, column=0, columnspan=2, sticky=tk.W+tk.E+tk.N+tk.S)\n\n # Create a frame for the export button\n export_frame = ttk.Frame(input_panel)\n export_frame.grid(row=9, column=0, columnspan=2, pady=10, sticky=tk.E)\n\n # Create a button to export the result\n export_button = ttk.Button(export_frame, text=\"Export\", command=lambda: self.export_data(result_text))\n export_button.pack(padx=5, pady=5)\n\n # Return the result_text widget\n return result_text\n \n \n def process_data(self, num_threads_entry, sra_id_entry, platform_entry, ref_genome_entry, fwd_entry, bwd_entry, output_entry, result_text):\n num_threads = num_threads_entry.get()\n sra_id = sra_id_entry.get()\n platform = platform_entry.get()\n ref_genome = ref_genome_entry.get()\n fwd = fwd_entry.get()\n bwd = bwd_entry.get()\n output = output_entry.get()\n \n rg_info = '\\\"@RG\\\\tID:'+ sra_id + '\\\\tPL:' + platform + '\\\\tSM:'+ sra_id +'\\\"'\n \n result = f\"bwa mem -t {num_threads} {rg_info} {ref_genome} {fwd} {bwd} {output}\"\n\n # Clear previous text content\n result_text.delete('1.0', tk.END)\n\n # Insert the new text content\n result_text.insert(tk.END, result)\n\n # Disable editing of the text widget\n result_text.configure(state=tk.DISABLED)\n\n def create_tab(self, notebook, tab_name, folder_path):\n self.notebook = notebook\n return super().create_tab(tab_name, folder_path)\n\ndef main():\n root = tk.Tk()\n root.title(\"Pipeline\")\n interface = BWA(root)\n interface.create_tab(\"fastQC\", \"/home/hhbach/gui/data\")\n interface.create_tab(\"BWA-MEM\", \"/path/to/folder2\")\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()","repo_name":"h2bach/thesis_Mutect2_Interface","sub_path":"src/tab_interfaces/bwa.py","file_name":"bwa.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13651762942","text":"'''\nProblem Description\n\nGiven an sorted array A of size N. Find number of elements which are less than or equal to B.\n\nNOTE: Expected Time Complexity O(log N)\n\n\n\nProblem Constraints\n1 <= N <= 106\n\n1 <= A[i], B <= 109\n\n\n\nInput Format\nFirst agument is an integer array A of size N.\n\nSecond argument is an integer B.\n\n\n\nOutput Format\nReturn an integer denoting the number of elements which are less than or equal to B.\n\n\n\nExample Input\nInput 1:\n\n A = [1, 3, 4, 4, 6]\n B = 4\nInput 2:\n\n A = [1, 2, 5, 5]\n B = 3\n\n\nExample Output\nOutput 1:\n\n 4\nOutput 2:\n\n 2\n\n\nExample Explanation\nExplanation 1:\n\n Elements (1, 3, 4, 4) are less than or equal to 4.\nExplanation 2:\n\n Elements (1, 2) are less than or equal to 3.\n'''\nclass Solution:\n # @param A : list of integers\n # @param B : integer\n # @return an integer\n def solve(self, A, B):\n n = len(A)\n left = 0\n right = n - 1\n \n count = 0\n \n while (left <= right): \n mid = int((right + left) / 2) \n \n # Check if middle element is less than or equal to B\n if (A[mid] <= B): \n \n # At least (mid + 1) elements are there whose values are less than or equal to key \n count = mid + 1\n left = mid + 1\n \n # If B is smaller, ignore right half \n else: \n right = mid - 1\n \n return count \n","repo_name":"97cool-vikas/Interviewbit","sub_path":"Python3/Binary Search/Simple Binary Search/Smaller or equal elements.py","file_name":"Smaller or equal elements.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39471886645","text":"#!/usr/bin/env python\n'''Utilities required for interfacing with the pw-DFT code Quantum Espresso.\n'''\nfrom __future__ import print_function, absolute_import\n\nimport re\nimport numpy as np\n\nimport sys\n\nfrom dislopy.atomic import crystal as cry\nfrom dislopy.utilities import atomistic_utils as util\nfrom dislopy.atomic import transmutation as mutate\n\nnamelists= ['&control','&system','&electrons','&ions','&cell']\ncards = ['CELL_PARAMETERS','ATOMIC_SPECIES','ATOMIC_POSITIONS','CONSTRAINTS',\n 'OCCUPATIONS','FORCES','K_POINTS']\n\ndef parse_qe(filename, qe_struc, path='./'):\n '''Parses qe file and extracts structure to\n .\n '''\n \n qe_lines = util.read_file(filename, path)\n # extract cards and namelists\n # begin by extracting indices of card and namelist block entries\n name_i = [i for i, line in enumerate(qe_lines) if any([name in\n line for name in namelists])]\n card_i = [i for i, line in enumerate(qe_lines) if any([card in\n line for card in cards])]\n \n # extract namelist blocks\n name_dict = dict()\n for i in range(len(name_i)):\n # extract the namelist key\n for key in namelists:\n if key in qe_lines[name_i[i]]:\n use_key = key\n break\n # extract elements of namelist to dictionary\n try:\n name_dict[use_key] = qe_lines[name_i[i]:name_i[i+1]]\n except IndexError:\n name_dict[use_key] = qe_lines[name_i[i]:card_i[0]]\n \n # extract card blocks\n card_dict = dict()\n for i in range(len(card_i)):\n # extract the card key\n for key in cards:\n if key in qe_lines[card_i[i]]:\n use_key = key\n break\n # extract elements of card block to the card dictionary\n try:\n card_dict[use_key] = qe_lines[card_i[i]:card_i[i+1]]\n except IndexError:\n card_dict[use_key] = qe_lines[card_i[i]:]\n \n # lattice parameters and atomic coordinates\n for i, cell_param in enumerate(card_dict['CELL_PARAMETERS'][1:]):\n cell_param = cell_param.split()\n new_vec = np.array([float(x) for x in cell_param])\n qe_struc.setVector(new_vec, i)\n \n # extract atoms\n for atom_line in card_dict['ATOMIC_POSITIONS'][1:]:\n temp = atom_line.split()\n name = temp[0]\n coords = np.array([float(x) for x in temp[1:]])\n qe_struc.addAtom(cry.Atom(name, coords))\n \n sys_info = extract_parameters(name_dict, card_dict) \n return sys_info\n\ndef extract_parameters(name_dict, card_dict):\n '''Extract the simulation parameters to sys_info.\n '''\n \n sys_info = dict()\n \n # regex to capture variable definition\n var_form = re.compile('(?P\\w[\\w\\d_\\(\\)]*(?:\\([\\w\\d_,\\s]+\\))?)\\s*=\\s*(?P[^,]+)')\n \n # extract namelist guff to \n sys_info['namelists'] = dict()\n for key in name_dict:\n sys_info['namelists'][key] = dict()\n for el in name_dict[key]:\n found = var_form.finditer(el)\n if found:\n for entry in found:\n sys_info['namelists'][key][entry.group('name')] = entry.group('value') \n \n # extract the card guff to . Because entries in the card blocks do\n # not share a common format, each possible card must be parsed separately.\n sys_info['cards'] = dict() \n for key in card_dict:\n if key == 'CELL_PARAMETERS':\n # extract cell parameter units\n units = re.search(r'{\\s*(?P(alat|bohr|angstrom))\\s*}',\n card_dict['CELL_PARAMETERS'][0])\n if units:\n sys_info['cards']['CELL_PARAMETERS'] = units.group('units')\n else:\n print('No units supplied. Assuming bohr.')\n sys_info['cards']['CELL_PARAMETERS'] = None\n if key == 'K_POINTS':\n header = re.compile('K_POINTS\\s+{\\s*(?P(automatic|gamma))\\s*}')\n preamble = header.search(card_dict['K_POINTS'][0])\n if not preamble:\n raise ValueError(\"Specified grid generation scheme not supported.\")\n else:\n if preamble.group('type') == 'gamma':\n sys_info['cards']['K_POINTS'] = None\n else:\n grid_form = re.compile('(?P(?:\\d+\\s+){3})(?P\\d+\\s+\\d+\\s+\\d+)')\n grid = re.search(grid_form, card_dict['K_POINTS'][1])\n sys_info['cards']['K_POINTS'] = dict()\n sys_info['cards']['K_POINTS']['spacing'] = np.array([float(x)\n for x in grid.group('grid').split()])\n sys_info['cards']['K_POINTS']['shift'] = grid.group('shift') \n elif key == 'ATOMIC_SPECIES':\n # handle pseudopotentials\n sys_info['cards'][key] = card_dict[key][1:]\n elif key == 'OCCUPATIONS':\n # handle occupations -> not yet implemented\n print(\"Occupations not implemented. Skipping...\")\n else:\n # information stored elsewhere\n pass\n \n return sys_info\n \ndef add_psps(sim_info, new_psps):\n '''Add additional pseudopotentials to . Useful for \n impurity calculations where impurity atoms are not present in the\n bulk material. is a list of objects of class .\n '''\n \n for psp in new_psps:\n sim_info['cards']['ATOMIC_SPECIES'].append(str(psp))\n\nclass Pseudopotential(object):\n '''Holds information for a QE pseudopotential.\n '''\n \n def __init__(self, species, atomic_weight, psp):\n self.species = species\n self.weight = atomic_weight\n self.psp = psp\n\n def __str__(self):\n return ' {} {:.4f} {}'.format(self.species, self.weight, \n self.psp)\n \ndef scale_nbands(system_nmlst, sc_dims):\n '''If the variable (number of valence bands) is specified in the \n system information for the base cell, scale by the size of the new supercell\n defined by (i.e. the multiples of unit cell parameters in x,y,z).\n '''\n \n if 'nbnd' in system_nmlst.keys():\n # increase number of bands to reflect new sc size\n old_nbands = int(system_nmlst['nbnd'])\n new_nbands = old_nbands*sc_dims[0]*sc_dims[1]*sc_dims[2]\n system_nmlst['nbnd'] = new_nbands\n \n return \n \ndef write_qe(outstream, qe_struc, sys_info, defected=True, to_cart=False,\n add_constraints=False, relax_type='scf', impurities=None, do_relax=None, prop=None):\n '''Writes crystal structure contained in to . is\n a dummy variable to make input consistent with .\n '''\n \n # if isolated/coupled defects have been supplied, add these to the structure\n if not (impurities is None):\n if mutate.is_single(impurities):\n mutate.cell_defect(qe_struc, impurities, use_displaced=True)\n elif mutate.is_coupled(impurities):\n mutate.cell_defect_cluster(qe_struc, impurities, use_displaced=True)\n else:\n raise TypeError(\"Supplied defect not of type /\")\n \n # write namelists\n for block in namelists:\n # test that the namelist is not empty\n if not (block in sys_info['namelists'].keys()):\n continue\n \n # else, write the block \n outstream.write(' {}\\n'.format(block))\n for variable in sys_info['namelists'][block]:\n if variable == 'calculation':\n if not (relax_type is None):\n outstream.write(' calculation = \\'{}\\'\\n'.format(relax_type))\n else:\n print(\"No calculation type specified; defaulting to scf\")\n outstream.write(' calculation = \\'scf\\'\\n')\n elif variable == 'nat':\n outstream.write(' nat = {}\\n'.format(len(qe_struc)))\n elif variable == 'ntyp':\n outstream.write(' ntyp = {}\\n'.format(qe_struc.number_of_elements()))\n else:\n outstream.write(' {} = {}\\n'.format(variable, \n sys_info['namelists'][block][variable]))\n outstream.write(' /\\n')\n \n # write pseudopotentials\n outstream.write(\" ATOMIC_SPECIES\\n\")\n for psp in sys_info['cards']['ATOMIC_SPECIES']:\n outstream.write(psp + '\\n')\n \n # write atomic coordinates\n outstream.write(' ATOMIC_POSITIONS { crystal }\\n')\n qe_struc.write(outstream, defected=defected, add_constraints=add_constraints)\n \n # write lattice \n outstream.write(' CELL_PARAMETERS')\n outstream.write(' {{ {} }}\\n'.format(sys_info['cards']['CELL_PARAMETERS']))\n qe_struc.writeLattice(outstream.write)\n \n # write k-point grid\n if not sys_info['cards']['K_POINTS']:\n # use the gamma point\n outstream.write(' K_POINTS { gamma }\\n')\n else:\n # use automatically generated Monkhorst-Pack grid\n outstream.write(' K_POINTS { automatic }\\n')\n grid = sys_info['cards']['K_POINTS']['spacing']\n outstream.write(' {} {} {}'.format(grid[0], grid[1], grid[2]))\n outstream.write(' {}\\n'.format(sys_info['cards']['K_POINTS']['shift']))\n \n outstream.close()\n \n # finally, remove any impurity atoms that have been appended to \n if not (impurities is None):\n mutate.undo_defect(qe_struc, impurities)\n \n return\n","repo_name":"andreww/disloPy","sub_path":"dislopy/atomic/qe_utils.py","file_name":"qe_utils.py","file_ext":"py","file_size_in_byte":9867,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"14651275474","text":"from flask import Flask\r\n#第一步先创建一个叫static的资料夹\r\n# 127.0.0.1:5000/test/sally.jpg\r\n\r\n\r\napp = Flask(__name__,static_folder='./static',static_url_path='/test')\r\n# 资料夹位置-当前的static资料夹 网页接口\r\n\r\n\r\nif __name__ =='__main__':\r\n app.run(debug=True,host='0.0.0.0',port=5000)","repo_name":"owoicc129/ceb102-Flask","sub_path":"03調用static目錄裡的東西.py","file_name":"03調用static目錄裡的東西.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34082977459","text":"def sumNumbers(root):\n self.res = 0\n\n def dfs(node, temp):\n if not node:\n return\n temp = temp*10 + node.val\n if node.left or node.right:\n self.res += temp\n \n dfs(node.left, temp)\n dfs(node.right, temp)\n\n temp /= 10\n \n dfs(root, 0)\n return self.res","repo_name":"zjsdcae/LeetCode","sub_path":"Tree/129.py","file_name":"129.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26164725568","text":"import os\nimport logging\nimport logging.config\nimport sys\nimport yaml\n\n\"\"\"\nThis function is used to setup root logging configuration,\n* If LOG_CFG variable is set, then use it for logging configuration path\n* Since we are using yaml configuration, we need yaml module to load configuration file\n* Set Root logger configuration using `logging.config.dictConfig()`\n* Any exception results in setting up root logger in default configuration. \n\"\"\"\n\n\n# Function to configure root logger\ndef setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):\n \"\"\"\n | Logging Setup\n |\n :param default_path: Logging configuration path\n :param default_level: Default logging level\n :param env_key: Logging config path set in environment variable\n \"\"\"\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n\n print(os.path.exists(path))\n if os.path.exists(path):\n with open(path, 'rt') as f:\n try:\n config = yaml.safe_load(f.read())\n logging.config.dictConfig(config)\n except Exception as e:\n print('Error in Logging Configuration. Using default configs. ', e)\n logging.basicConfig(level=default_level, stream=sys.stdout)\n else:\n logging.basicConfig(level=default_level, stream=sys.stdout)\n print('Failed to load configuration file. Using default configs')","repo_name":"howesa/corati_home","sub_path":"models/touchscreen-typing/src/utilities/logging_config_manager.py","file_name":"logging_config_manager.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"74456817223","text":"from sys import stdin as st\n\nN = int(st.readline())\n\nresult = 0\ni = 0\n\nwhile N:\n if N % -2:\n N = (N//-2) + 1\n result += 10**i\n else:\n N = N//-2\n \n i+= 1\n \nprint(result)","repo_name":"DGKwak/Baekjoon","sub_path":"2089.py","file_name":"2089.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34303231967","text":"def calAns(N):\n def find_odd(n):\n flag = 0\n temp = n\n while(temp != 0):\n r = temp % 10\n if r % 2 != 0:\n flag = 1\n break\n temp = int(temp / 10)\n return flag\n\n def calc_press(value, n):\n co = 0\n while True:\n co += 1\n n = n + value\n if find_odd(n) is 0:\n return co\n OPTION_1 = +1\n OPTION_2 = -1\n\n if find_odd(N) is 0:\n ans = f\"Case #{i+1}: 0\"\n\n else:\n OPTION_PLUS = calc_press(OPTION_1, N)\n OPTION_MINUS = calc_press(OPTION_2, N)\n ans = f\"Case #{i+1}: {min(OPTION_PLUS, OPTION_MINUS)}\"\n\n return ans\n \nif __name__ == \"__main__\":\n T = int(input()) \n for i in range(T):\n N = int(input())\n print(calAns(N))\n''' N = str(n)\n flag = 0\n for n in N:\n if int(n) % 2 != 0:\n flag = 1 \n break\n return flag''' ","repo_name":"RogueBugger/workspace","sub_path":"google competiton/phase A/supervin_calculator1.py","file_name":"supervin_calculator1.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3165900754","text":"from morse_code import dictionary\n\n\ndef str_to_morse(user_string):\n new_string = \"\"\n for position in range(len(user_string)):\n if user_string[position] in dictionary:\n new_string += dictionary[user_string[position]] + \" \"\n else:\n new_string += user_string[position] + \" \"\n return new_string\n\n\none_more = True\nwhile one_more:\n # Get user input\n user_input = input(\"Write you message: \").lower()\n # Convert user input to string and print\n print(str_to_morse(user_input))\n # Try again loop\n wrong_input = True\n while wrong_input:\n user_want_more = input(\"Do you want more? Y or N?\").lower()\n if user_want_more == \"y\":\n one_more = True\n wrong_input = False\n elif user_want_more == \"n\":\n one_more = False\n wrong_input = False\n else:\n wrong_input = True\n","repo_name":"Mati55e/string-to-morse","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34019837603","text":"import numpy as np\nimport os\nfrom skimage import io, transform, filters\nfrom numba import jit\n\n\n@jit(nopython=True)\ndef get_background(im_arr, t):\n h, w = im_arr.shape[:2]\n background_mask = np.zeros(im_arr.shape, np.int8)\n for i in range(0, h, 8):\n for j in range(0, w, 8):\n block = im_arr[i:i+8, j:j+8]\n if (np.std(block) < t) & (np.mean(block) < 200) & (np.mean(block) > 2):\n background_mask[i:i+8, j:j+8] = 1\n return background_mask\n\n\n@jit(nopython=True)\ndef get_background_colour(background_image):\n background_colour = np.zeros((background_image.shape[0]//50, background_image.shape[1]//50), np.float32)\n h, w = background_image.shape\n for i in range(0, h//50):\n for j in range(0, w//50):\n curr_slice = background_image[i*50:(i+1)*50, j*50:(j+1)*50]\n nan_sum = np.sum(np.isnan(curr_slice))\n if nan_sum < 50 * 50:\n background_colour[i, j] = np.nanmean(curr_slice)\n return background_colour\n\n\ndef correct(im_arr, t):\n # get mask for missing areas, we will later colour them grey\n grad1, grad2 = np.gradient(im_arr)\n missing = grad1 + grad2 + im_arr == 0\n background_mask = get_background(im_arr, t).astype(bool)\n background_image = np.where(background_mask, im_arr, np.nan)\n background_colour = get_background_colour(background_image)\n background_colour = np.repeat(np.repeat(background_colour, 50, 0), 50, 1)\n background_colour = filters.gaussian(background_colour, sigma=50, preserve_range=True)\n # rescale so [background_colour_full, 255] maps onto [0, 255]\n im_arr_out = 255 * (im_arr - background_colour) / (255 - background_colour)\n im_arr_out = np.maximum(0, im_arr_out)\n im_arr_out[missing] = 125\n return im_arr_out\n\n\ndef process_one_image(source_folder, f):\n im = io.imread(os.path.join(source_folder, f))\n im = np.mean(im, 2)\n im = transform.downscale_local_mean(correct(im, 5), (4, 4)) / 255 - 0.5\n return im.astype(np.float32), f\n\n","repo_name":"btrotta/kaggle-clouds","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"8881459594","text":"\n\n\nword = \"Batman\"\n\n# Allowed errors the user can make\nallowed_errors = 3\n\n# letters the user has gussed\nguessed_letters = []\n\n# indication flag\ndone = False\n\nwhile not done:\n for letter in word:\n\n if letter.lower() in guessed_letters:\n print(letter, end=\" \")\n else:\n print(\"_\", end=\" \")\n print(\"\")\n\n guess = input(f\"health remaning{allowed_errors}, Nest Guess is:\")\n guessed_letters.append(guess.lower())\n if guess.lower() not in word.lower():\n allowed_errors -=1\n if allowed_errors == 0:\n break\n\n done = True\n for letter in word:\n if letter.lower() not in guessed_letters:\n done = False\nif done:\n print(f\"health remaning{allowed_errors}, You won, the word was {word}:\")\nelse:\n print(f\"health remaning{allowed_errors}, Game Over, the word was {word}:\")\n","repo_name":"rajputaman3141/Python-Games","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6933146311","text":"import multiprocessing\nimport subprocess\nimport signal\nimport sqlite3\nimport io\nimport os\nimport cherrypy\n\nfrom PIL import Image\nfrom tempfile import mktemp\n\nfrom . settings import image_dir\nfrom . common import is_image\n\nTHUMB_SIZE = (150, 150)\n\ndef img_thumb(src_file):\n\timg = Image.open(src_file)\n\timg.thumbnail(THUMB_SIZE)\n\tif img.mode != 'RGB':\n\t\timg = img.convert('RGB')\n\tstream = io.BytesIO()\n\timg.save(stream, format='JPEG')\n\treturn stream.getvalue()\n\ndef video_thumb(src_file):\n\toutfile = mktemp(suffix='.jpg')\n\tcall_args = ['ffmpeg', '-hide_banner', '-loglevel', 'warning', '-y', '-i', src_file, '-ss', '00:00:00.000', '-vframes', '1', outfile]\n\tsubprocess.call(call_args)\n\twith open(outfile, 'rb') as f:\n\t\tthumb = img_thumb(outfile)\n\tos.unlink(outfile)\n\treturn thumb\n\ndef thumb_worker(queue, db_filename, workernum=None):\n\tsignal.signal(signal.SIGINT, signal.SIG_IGN)\n\twhile True:\n\t\tqdata = queue.get()\n\t\tif len(qdata) != 2:\n\t\t\tbreak\n\t\tmd5, src_file = qdata\n\n\t\ttry:\n\t\t\tif is_image(src_file):\n\t\t\t\tthumb = img_thumb(src_file)\n\t\t\telse:\n\t\t\t\tthumb = video_thumb(src_file)\n\t\t\twith sqlite3.connect(db_filename) as conn:\n\t\t\t\tconn.execute('PRAGMA synchronous = OFF')\n\t\t\t\tconn.execute('INSERT OR IGNORE INTO thumbnails(md5, imgdata) VALUES (?,?)', (md5, thumb))\n\t\texcept Exception as e:\n\t\t\tcherrypy.log(\"Exception on file %s : %s\" % (src_file, e), context='THUMB')\n\t\t\tcontinue\n\t\t\nclass ThumbNailer:\n\tdef __init__(self, db_filename):\n\t\tself.num_procs = multiprocessing.cpu_count() * 2\n\t\tself.queue = multiprocessing.Queue(self.num_procs * 10)\n\t\tcherrypy.log(\"Loading cache\", context='THUMB')\n\t\twith cherrypy.tools.db.thumb.get() as conn, conn:\n\t\t\tcur = conn.execute('SELECT md5 FROM thumbnails')\n\t\t\tself.thumbcache = set(map(lambda x: x[0], cur))\n\t\t\n\t\tfor i in range(self.num_procs):\n\t\t\tp = multiprocessing.Process(target=thumb_worker, args=(self.queue, db_filename, i))\n\t\t\tp.daemon = True\n\t\t\tp.start()\n\t\t\n\tdef create(self, md5, source_file):\n\t\tif md5 in self.thumbcache:\n\t\t\treturn\n\t\tself.queue.put((md5, os.path.join(image_dir, source_file)))\n\n\tdef get(self, md5):\n\t\twith cherrypy.tools.db.thumb.get() as conn, conn:\n\t\t\tcur = conn.execute('SELECT imgdata FROM thumbnails WHERE md5 = ?', (md5,))\n\t\t\timg_data = cur.fetchone()\n\t\tif img_data:\n\t\t\tself.thumbcache.add(md5)\n\t\treturn img_data\n\n\tdef stop(self):\n\t\tfor i in range(self.num_procs):\n\t\t\tself.queue.put((None,))\n","repo_name":"loadletter/grabber-viewer","sub_path":"localbooru/tools/tumbler.py","file_name":"tumbler.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"8571267455","text":"import os\nimport pandas as pd\nimport numpy as np\nimport my_functions\nimport pingouin as pg\nimport time\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Show or don't show\n# behav_results = input(\"show behavioral results (y/n)?\") or 'n'\nbehav_results = 'y'\n\nif behav_results == 'y':\n show_behav = 1\nelif behav_results == 'n':\n show_behav = 0\nelse:\n print('enter y or n please')\n\n\n# Set pandas options\npd.set_option('display.max_columns', 242)\n# disable chained assignment warning\npd.options.mode.chained_assignment = None\ndata_ext = '.csv'\n\n# Directories to data\neye_data_dir = '/Users/joecool890/Dropbox/UC-Davis/projects/tnt_incid-mem/raw-data/eye_tracking/'\nbehav_data_dir = '/Users/joecool890/Dropbox/UC-Davis/projects/tnt_incid-mem/raw-data/eye_tracking/behav_data/'\n\n# load behavioral data for analysis\nbehav_data = my_functions.load_filepath(behav_data_dir)\nbehav_raw_df = pd.concat(map(lambda x: pd.read_csv(x), behav_data))\nbehav_raw_df.set_index('par_ID', inplace=True)\ntotal_par = len(behav_data)\n\nprint('\\n# of participants: ', total_par, '\\n')\n\n# filters for behavioral data\naccuracy_filter = behav_raw_df['accuracy'] == 1\ncorr_behav_data = behav_raw_df[accuracy_filter]\n\n# Grouping conditions\nraw_sem_cond = behav_raw_df.groupby(['par_ID', 'condition'])\ncorr_sem_cond = corr_behav_data.groupby(['par_ID', 'condition'])\n\n# dropped trials\ndropped_trials = 30 - corr_sem_cond['accuracy'].count().unstack()\ndropped_trials['total'] = dropped_trials.sum(axis=1) / 90 * 100\n\n# accuracy and RT\naccuracy_df = raw_sem_cond['accuracy'].mean() * 100\nRT_df = corr_sem_cond['RT'].mean()\n\n# ANOVA for RT and acc (behavioral)\nRT_df_anova = RT_df.reset_index()\nANOVA_RT = pg.rm_anova(data=RT_df_anova, dv='RT', within='condition', subject='par_ID').round(4)\n\npairwise_results_RT = pg.pairwise_ttests(data=RT_df_anova, dv='RT', within='condition',\n subject='par_ID', marginal=True, padjust='bonf')\n\naccuracy_df_anova = accuracy_df.reset_index()\nANOVA_ACC = pg.rm_anova(data=accuracy_df_anova, dv='accuracy', within='condition', subject='par_ID').round(4)\n\npairwise_results_ACC = pg.pairwise_ttests(data=accuracy_df_anova, dv='accuracy', within='condition',\n subject='par_ID', marginal=True, padjust='bonf')\n\n# Print data and results\nif show_behav == 1:\n print('# of dropped trials \\n', dropped_trials, '\\n')\n print('RT based on semantic conditions \\n', RT_df.unstack().mean(), '\\n')\n print('ANOVA for RT \\n', ANOVA_RT, '\\n\\n')\n print(\"Pairwise testings for RT \\n\", pairwise_results_RT, '\\n')\n print('Accuracy based on semantic conditions \\n', round(accuracy_df.unstack(), 2), '\\n')\n print(\"ANOVA for Acc \\n\", ANOVA_ACC, '\\n')\n print(\"Pairwise testings for Acc \\n\", pairwise_results_ACC, '\\n')\n print('Done with Behavioral data, moving on to eye data \\n\\n\\n')\n\n# load eye data\ndata_files = my_functions.load_filepath(eye_data_dir)\neye_raw_df = pd.concat(map(lambda x: pd.read_csv(x), data_files))\n\n# Rename columns\neye_raw_df.rename(columns={'RECORDING_SESSION_LABEL': 'par_ID'}, inplace=True)\neye_raw_df.set_index('par_ID', inplace=True)\neye_raw_df.rename(columns={'trial_num': 'trialNo'}, inplace=True)\n# remove practice trials from trial count\neye_raw_df['TRIAL_INDEX'] = eye_raw_df['TRIAL_INDEX'] - 15\n# Drop useless columns\neye_raw_df.drop(columns=my_functions.eye_drop_list, inplace=True)\n\n# Get correct trial number, use to remove inaccurate trials from eye track, skip for now\naccurate_trial_numbers = behav_raw_df[accuracy_filter][['trialNo', 'RT']]\n\n# filter out practice trials\ntrial_filter = eye_raw_df['condition'] != 'practice'\neye_df = eye_raw_df[trial_filter]\n\n# filter out incorrect trials from df\neye_df = eye_df.reset_index()\naccurate_eye_df = pd.DataFrame([])\nx = time.time()\nfor row in accurate_trial_numbers.itertuples():\n par_id = row[0]\n trial_index = row[1]\n par_trial = eye_df[((eye_df['par_ID'] == par_id) & (eye_df['TRIAL_INDEX'] == trial_index))]\n accurate_eye_df = pd.concat([accurate_eye_df, par_trial])\ntime_elapsed = time.time() - x\nprint(f'\\nDone! Only accurate trials filtered in : {(round(time_elapsed, 5))} seconds')\n\n# 1. Calculate total fixation duration - Joy (done)\n# last fixation duration - Joy (done)\n# whether they looked at the distractors and how long they looked at it (done)\n# how many times they looked at an object (2 different looks, 0 looks) (done)\n# Accurate as of 2022-03-29\n# useful to know what difference it makes if it's 1 look or 2, but\n# compare nearest and target just report the visual angle\n\n# Change type for numeric columns\naccurate_eye_df['CURRENT_FIX_INTEREST_AREA_DWELL_TIME'] = accurate_eye_df['CURRENT_FIX_INTEREST_AREA_DWELL_TIME'].replace(['.'], 0)\naccurate_eye_df['CURRENT_FIX_INTEREST_AREA_DWELL_TIME'] = pd.to_numeric(accurate_eye_df['CURRENT_FIX_INTEREST_AREA_DWELL_TIME'])\n\n# set counter for total fixation count\naccurate_eye_df['total_fixation_count'] = 1\n# Calculate total accurate trials\ntotal_trials = accurate_eye_df.drop_duplicates(['par_ID', 'TRIAL_INDEX'])\ntotal_trial_count = total_trials.groupby(['par_ID'])['TRIAL_INDEX'].count().reset_index(name='total_acc_trials')\n\n# Fixation on object (filter for separate interest areas)\ntarget_fix_filter = accurate_eye_df['CURRENT_FIX_INTEREST_AREA_LABEL'] == 'target_obj'\npair_fix_filter = accurate_eye_df['CURRENT_FIX_INTEREST_AREA_LABEL'] == 'pair_obj'\nneutral_fix_filter = (accurate_eye_df['CURRENT_FIX_INTEREST_AREA_LABEL'] == 'neutral1_obj') | (accurate_eye_df['CURRENT_FIX_INTEREST_AREA_LABEL'] == 'neutral2_obj')\n\nfirst_fix_filter = accurate_eye_df['CURRENT_FIX_INDEX'] == 2 # 1 is fixation\n\n# apply filter and grab appropriate trials\ntarget_fix_trials_raw = accurate_eye_df[target_fix_filter]\npair_fix_trials_raw = accurate_eye_df[pair_fix_filter]\nneutral_fix_trials_raw = accurate_eye_df[neutral_fix_filter]\n\n# Get target fixation trials\ntarget_fix_trials = target_fix_trials_raw[['par_ID', 'TRIAL_INDEX', 'condition', 'CURRENT_FIX_INTEREST_AREA_DWELL_TIME', 'total_fixation_count']]\ntarget_fix_trials.rename(columns={'CURRENT_FIX_INTEREST_AREA_DWELL_TIME': 'target_fix_dur'}, inplace=True)\n\n# Figure out fixation count on target (Replace with dedicated column)\ntarget_fixation_count = target_fix_trials.groupby(['par_ID','TRIAL_INDEX'])['total_fixation_count'].count().reset_index(name='total_fixation_count')\ntarget_fix_trials = target_fix_trials.drop_duplicates()\ntarget_fix_trials = target_fix_trials.drop(columns=['total_fixation_count'])\ntarget_fix_trials = pd.merge(target_fix_trials, target_fixation_count, how='inner', on=['par_ID', 'TRIAL_INDEX'])\ntarget_fix_trials.set_index('par_ID', inplace=True)\n\ntarget_fix_RT = target_fix_trials.groupby(['par_ID', 'condition'])['target_fix_dur'].mean().unstack()\ntarget_fix_count = target_fix_trials.groupby(['par_ID', 'condition'])['total_fixation_count'].mean().unstack()\n\n# Get semantic pair fixation trials\npair_fix_trials = pair_fix_trials_raw[['par_ID', 'TRIAL_INDEX', 'condition', 'CURRENT_FIX_INTEREST_AREA_DWELL_TIME', 'total_fixation_count']]\npair_fix_trials.rename(columns={'CURRENT_FIX_INTEREST_AREA_DWELL_TIME': 'pair_fix_dur'}, inplace=True)\n\n# Figure out fixation count on semantic pair\npair_fixation_count = pair_fix_trials.groupby(['par_ID','TRIAL_INDEX'])['total_fixation_count'].count().reset_index(name='total_fixation_count')\npair_fix_trials = pair_fix_trials.drop_duplicates()\npair_fix_trials = pair_fix_trials.drop(columns=['total_fixation_count'])\npair_fix_trials = pd.merge(pair_fix_trials, pair_fixation_count, how='inner', on=['par_ID', 'TRIAL_INDEX'])\npair_fix_trials.set_index('par_ID', inplace=True)\n\n# Get meaningful data\npair_fix_trials_RT = pair_fix_trials.groupby(['par_ID', 'condition'])['pair_fix_dur'].mean().unstack()\npair_fix_count = pair_fix_trials.groupby(['par_ID', 'condition'])['total_fixation_count'].mean().unstack()\n\n# Get Neutral Pair dwell time\nneutral_fix_trials = neutral_fix_trials_raw[['par_ID', 'TRIAL_INDEX', 'condition', 'CURRENT_FIX_INTEREST_AREA_DWELL_TIME', 'total_fixation_count']]\nneutral_fix_trials.rename(columns={'CURRENT_FIX_INTEREST_AREA_DWELL_TIME': 'neu_fix_dur'}, inplace=True)\n\n# Get neutral distractor fixation trials\nneutral_fixation_count = neutral_fix_trials.groupby(['par_ID','TRIAL_INDEX'])['total_fixation_count'].count().reset_index(name='total_fixation_count')\nneutral_fix_trials = neutral_fix_trials.drop_duplicates()\nneutral_fix_trials = neutral_fix_trials.drop(columns=['total_fixation_count'])\nneutral_fix_trials = pd.merge(neutral_fix_trials, neutral_fixation_count, how='inner', on=['par_ID', 'TRIAL_INDEX'])\nneutral_fix_trials.set_index('par_ID', inplace=True)\n\nneutral_fix_trials_RT = neutral_fix_trials.groupby(['par_ID', 'condition'])['neu_fix_dur'].mean().unstack()\nneutral_fix_count = neutral_fix_trials.groupby(['par_ID', 'condition'])['total_fixation_count'].mean().unstack()\n\n# Combine all three conditions\nall_RT = pd.concat([target_fix_RT, pair_fix_trials_RT, neutral_fix_trials_RT], axis=1)\nall_RT.columns = ['target_neu_RT', 'target_tax_RT', 'target_thm_RT', 'pair_neu_RT', 'pair_tax_RT', 'pair_thm_RT',\n 'neu-pair_neu_RT', 'neu-pair_tax_RT', 'neu-pair_thm_RT']\nall_RT.to_clipboard()\n\n# ANOVA for RT and acc\ntarget_dwell_anova = target_fix_trials.groupby(['par_ID', 'condition'])['target_fix_dur'].mean().reset_index()\ntarget_dwell_ANOVA_RT = pg.rm_anova(data=target_dwell_anova, dv='target_fix_dur', within='condition', subject='par_ID').round(2)\n\ntarget_pairwise_results_RT = pg.pairwise_ttests(data=target_dwell_anova, dv='target_fix_dur', within='condition',\n subject='par_ID', marginal=True, padjust='bonf')\n\npair_dwell_anova = pair_fix_trials.groupby(['par_ID', 'condition'])['pair_fix_dur'].mean().reset_index()\npair_dwell_ANOVA_RT = pg.rm_anova(data=pair_dwell_anova, dv='pair_fix_dur', within='condition', subject='par_ID').round(2)\n\npair_pairwise_results_RT = pg.pairwise_ttests(data=pair_dwell_anova, dv='pair_fix_dur', within='condition',\n subject='par_ID', marginal=True, padjust='bonf')\n\nneutral_dwell_anova = neutral_fix_trials.groupby(['par_ID', 'condition'])['neu_fix_dur'].mean().reset_index()\nneutral_dwell_ANOVA_RT = pg.rm_anova(data=neutral_dwell_anova, dv='neu_fix_dur', within='condition', subject='par_ID').round(2)\n\n# ANOVA for fixation count\ntarget_fix_count_anova = target_fix_trials.groupby(['par_ID', 'condition'])['total_fixation_count'].mean().reset_index()\ntarget_fix_count_ANOVA = pg.rm_anova(data=target_fix_count_anova, dv='total_fixation_count', within='condition', subject='par_ID').round(2)\n\npair_fix_count_anova = pair_fix_trials.groupby(['par_ID', 'condition'])['total_fixation_count'].mean().reset_index()\npair_fix_count_ANOVA = pg.rm_anova(data=pair_fix_count_anova, dv='total_fixation_count', within='condition', subject='par_ID').round(2)\n\nneutral_fix_count_anova = neutral_fix_trials.groupby(['par_ID', 'condition'])['total_fixation_count'].mean().reset_index()\nneutral_fix_count_ANOVA = pg.rm_anova(data=neutral_fix_count_anova, dv='total_fixation_count', within='condition', subject='par_ID').round(2)\nneutral_fix_pairwise_results = pg.pairwise_ttests(data=neutral_fix_count_anova, dv='total_fixation_count', within='condition',\n subject='par_ID', marginal=True, padjust='bonf')\n# Print data and results\nprint('Dwell time on targets \\n', target_fix_RT.mean(), '\\n')\nprint('ANOVA for target dwell time \\n', target_dwell_ANOVA_RT, '\\n\\n')\nprint(\"Pairwise testings for RT \\n\", target_pairwise_results_RT, '\\n')\n\nprint('Dwell time on semantic pairs \\n', pair_fix_trials_RT.mean(), '\\n')\nprint('ANOVA for semantic pair dwell time \\n', pair_dwell_ANOVA_RT, '\\n\\n')\nprint(\"Pairwise testings for RT \\n\", pair_pairwise_results_RT, '\\n')\n\nprint('Dwell time on neutral distractor \\n', neutral_fix_trials_RT.mean(), '\\n')\nprint('ANOVA for neutral distractor dwell time \\n', neutral_dwell_ANOVA_RT, '\\n\\n')\n\n\n# Print data and results\nprint('ANOVA for target fixation count \\n', target_fix_count_ANOVA, '\\n\\n')\nprint('ANOVA for semantic pair fixation count \\n', pair_fix_count_ANOVA, '\\n\\n')\nprint('ANOVA for neutral distractor fixation count \\n', neutral_fix_count_ANOVA, '\\n\\n')\n\n\n# Calculate\n# Final\n# Fixation\n# Duration\n\ntarget_final_fix_trials = target_fix_trials_raw[['par_ID', 'TRIAL_INDEX', 'condition', 'CURRENT_FIX_INDEX', 'CURRENT_FIX_DURATION', 'TRIAL_FIXATION_TOTAL']]\ntarget_final_fix_trials = target_final_fix_trials.drop_duplicates(['par_ID', 'TRIAL_INDEX'], keep='last')\ntarget_final_fix_trials = target_final_fix_trials[(target_final_fix_trials['CURRENT_FIX_INDEX'] - target_final_fix_trials['TRIAL_FIXATION_TOTAL'] == 0) | (target_final_fix_trials['CURRENT_FIX_INDEX'] - target_final_fix_trials['TRIAL_FIXATION_TOTAL'] == -1)]\n# target_final_fix_trials = target_final_fix_trials[(target_final_fix_trials['CURRENT_FIX_INDEX'] - target_final_fix_trials['TRIAL_FIXATION_TOTAL'] == 0)]\ntarget_final_fix_trials.set_index('par_ID', inplace=True)\n\ntarget_final_fix_RT = target_final_fix_trials.groupby(['par_ID', 'condition'])['CURRENT_FIX_DURATION'].mean().unstack()\n\ntarget_final_fix_anova = target_final_fix_trials.groupby(['par_ID', 'condition'])['CURRENT_FIX_DURATION'].mean().reset_index()\n\ntarget_final_fix_ANOVA_RT = pg.rm_anova(data=target_final_fix_anova, dv='CURRENT_FIX_DURATION', within='condition', subject='par_ID').round(2)\ntarget_final_fix_pairwise_results_RT = pg.pairwise_ttests(data=target_final_fix_anova, dv='CURRENT_FIX_DURATION', within='condition',\n subject='par_ID', marginal=True, padjust='bonf')\n\n# target_final_fix_RT.to_clipboard()\n\nprint('Final Fixation duration (ms) \\n', target_final_fix_RT.mean(), '\\n')\nprint('ANOVA for Final Fixation duration \\n', target_final_fix_ANOVA_RT, '\\n\\n')\nprint(\"Pairwise testings for RT \\n\", target_final_fix_pairwise_results_RT, '\\n')\n\nprint(f'\\nShall we draw graphs now?')\n\ntarget_final_fix_ANOVA_RT.to_clipboard()\n# Figure setup\ncolors = [\"#FF221A\", \"#6A9551\", \"#D2AC3A\"]\nconditions_x = ['Neutral', 'Taxonomic', 'Thematic']\nconditions = 3\nsns.set_palette(sns.color_palette(colors))\nsns.set_context('talk')\nsns.set_style('white')\nfig_0, axes_0 = plt.subplots(figsize=(12, 6), nrows=1, ncols=2)\n\n# Graph Parameters\nerrbar_color = 'black'\nerrbar_line_width = 2\nerrbar_capsize = 5\nerrbar_capthick = 2\nfont_color = 'black'\ntrans_param = False\nprops = {'connectionstyle': 'bar', 'arrowstyle': '-', 'shrinkA': 20, 'shrinkB': 20, 'linewidth': 2,\n \"color\": font_color}\nprops2 = {'connectionstyle': 'bar', 'arrowstyle': '-', 'shrinkA': 25, 'shrinkB': 25, 'linewidth': 2,\n \"color\": font_color}\nprops3 = {'connectionstyle': 'bar', 'arrowstyle': '-', 'shrinkA': 40, 'shrinkB': 40, 'linewidth': 2,\n \"color\": font_color}\n# figure 1a\n# Figure 1a, data\nf_RT_means = RT_df.unstack().mean()\nf_sem_RT_means = RT_df.unstack().sem()\n\n# Draw graph and error bar\naxes_0[0].bar(np.arange(conditions), f_RT_means, color=colors, edgecolor='black', linewidth=2)\naxes_0[0].errorbar(np.arange(conditions), f_RT_means, yerr=f_sem_RT_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_0[0].set_title('RT for Visual Search', size=20, color=font_color)\n\n# x axis stuff\naxes_0[0].set_xlabel('Semantic Conditions', color=font_color)\nplt.setp(axes_0, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n# y-axis stuff\naxes_0[0].set_ylabel('RT (ms)', color=font_color)\n\n# Draw significance bars\naxes_0[0].text(0.5, 1380, '*', size=20, color=font_color)\naxes_0[0].annotate('', xy=(0, 1250), xytext=(1, 1250), arrowprops=props2)\naxes_0[0].text(1.5, 1380, '*', size=20, color=font_color)\naxes_0[0].annotate('', xy=(1, 1200), xytext=(2, 1200), arrowprops=props2)\n\naxes_0[0].set_ylim(0, 1500)\n\n# Figure 1b\n# Data\nf_ACC_means = accuracy_df.unstack().mean()\nf_sem_ACC_means = accuracy_df.unstack().sem()\n\n# Draw graph and error bar\naxes_0[1].bar(np.arange(conditions), f_ACC_means, color=colors, edgecolor='black', linewidth=2)\naxes_0[1].errorbar(np.arange(conditions), f_ACC_means, yerr=f_sem_ACC_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_0[1].set_title('Accuracy for Visual Search', size=20, color=font_color)\n\n# x axis stuff\naxes_0[1].set_xlabel('Semantic Conditions', color=font_color)\n\n# y-axis stuff\naxes_0[1].set_ylabel('Accuracy (%)', color=font_color)\naxes_0[1].set_ylim(0, 110)\n\n# Draw significance bars\naxes_0[1].annotate('', xy=(0, 85), xytext=(1, 85), arrowprops=props)\naxes_0[1].text(1.25, 100, '*', size=20, color=font_color)\naxes_0[1].annotate('', xy=(.5, 85), xytext=(2, 85), arrowprops=props3)\nsns.despine()\nplt.tight_layout(h_pad=2.0)\nplt.savefig('f_RT-ACC.png', transparent=trans_param)\nplt.show()\n\n\n# Figure 2a setup\nfig_1, axes_1 = plt.subplots(figsize=(14, 6), nrows=1, ncols=3)\n\n# data\ntarget_RT_means = target_fix_RT.mean()\ntarget_sem_RT_means = target_fix_RT.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_1[0].bar(np.arange(conditions), target_RT_means, color=colors, edgecolor='black', linewidth=2)\naxes_1[0].errorbar(np.arange(conditions), target_RT_means, yerr=target_sem_RT_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_1[0].set_title('Target Fix. Time', size=20, color=font_color)\n\n# x axis stuff\n# axes_1[0].set_xlabel('Semantic Conditions', color=font_color)\nplt.setp(axes_1, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n# y-axis stuff\naxes_1[0].set_ylabel('Fixation Duration (ms)', color=font_color)\n\n# Draw significance bars\naxes_1[0].text(0.75, 670, '*', size=20, color=font_color)\naxes_1[0].annotate('', xy=(0, 590), xytext=(1.5, 590), arrowprops=props2)\naxes_1[0].annotate('', xy=(1, 580), xytext=(2, 580), arrowprops=props)\n\n# Figure 2b, data\npair_RT_means = pair_fix_trials_RT.mean()\npair_sem_RT_means = pair_fix_trials_RT.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_1[1].bar(np.arange(conditions), pair_RT_means, color=colors, edgecolor='black', linewidth=2)\naxes_1[1].errorbar(np.arange(conditions), pair_RT_means, yerr=pair_sem_RT_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_1[1].set_title('Sem Pair Fix. Time', size=20, color=font_color)\n\n# x axis stuff\n# axes_1[1].set_xlabel('Semantic Conditions', color=font_color)\nplt.setp(axes_1, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n# Draw significance bars\naxes_1[1].text(0.5, 350, '*', size=20, color=font_color)\naxes_1[1].annotate('', xy=(0, 250), xytext=(.95, 250), arrowprops=props)\naxes_1[1].text(1.5, 350, '*', size=20, color=font_color)\naxes_1[1].annotate('', xy=(1.05, 250), xytext=(2, 250), arrowprops=props)\n\n# Figure 2c, data\nneutral_RT_means = neutral_fix_trials_RT.mean()\nneutral_sem_RT_means = neutral_fix_trials_RT.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_1[2].bar(np.arange(conditions), neutral_RT_means, color=colors, edgecolor='black', linewidth=2)\naxes_1[2].errorbar(np.arange(conditions), neutral_RT_means, yerr=neutral_sem_RT_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_1[2].set_title('Neutral Distractor Fix. Time', size=20, color=font_color)\n\n# x axis stuff\n# axes_1[2].set_xlabel('Semantic Conditions', color=font_color)\nplt.setp(axes_1, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n\n# Add # of participants to graph\naxes_1[2].text(2, -100, 'n = ' + str(total_par), color=font_color)\n\n# limit y axis\naxes_1[0].set_ylim(0, 700)\naxes_1[1].set_ylim(0, 700)\naxes_1[2].set_ylim(0, 700)\n\n# Finalize and print\nsns.despine()\nplt.tight_layout(h_pad=2.0)\nplt.savefig('f_fix_RT.png', transparent=trans_param)\nplt.show()\n\n# Figure 3a setup\nfig_2, axes_2 = plt.subplots(figsize=(14, 6), nrows=1, ncols=3)\n\n# data\ntarget_fix_means = target_fix_count.mean()\ntarget_sem_fix_means = target_fix_count.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_2[0].bar(np.arange(conditions), target_fix_means, color=colors, edgecolor='black', linewidth=2)\naxes_2[0].errorbar(np.arange(conditions), target_fix_means, yerr=target_sem_fix_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_2[0].set_title('Target Fix. Count', size=20, color=font_color)\n\n# x axis stuff\nplt.setp(axes_2, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n# y-axis stuff\naxes_2[0].set_ylabel('Fixation Count', color=font_color)\n\n# Figure 2b, data\npair_fix_means = pair_fix_count.mean()\npair_sem_fix_means = pair_fix_count.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_2[1].bar(np.arange(conditions), pair_fix_means, color=colors, edgecolor='black', linewidth=2)\naxes_2[1].errorbar(np.arange(conditions), pair_fix_means, yerr=pair_sem_fix_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_2[1].set_title('Sem Pair Fix. Count', size=20, color=font_color)\n\n# x axis stuff\n# axes_1[1].set_xlabel('Semantic Conditions', color=font_color)\nplt.setp(axes_2, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n\n# Figure 2c, data\nneutral_fix_means = neutral_fix_count.mean()\nneutral_sem_fix_means = neutral_fix_count.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_2[2].bar(np.arange(conditions), neutral_fix_means, color=colors, edgecolor='black', linewidth=2)\naxes_2[2].errorbar(np.arange(conditions), neutral_fix_means, yerr=neutral_sem_fix_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_2[2].set_title('Neu. Distractor Fix. Count', size=20, color=font_color)\n\n# x axis stuff\n# axes_1[2].set_xlabel('Semantic Conditions', color=font_color)\nplt.setp(axes_2, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n\n# Add # of participants to graph\naxes_2[2].text(2, -.3, 'n = ' + str(total_par), color=font_color)\n\n# limit y axis\naxes_2[0].set_ylim(0, 2)\naxes_2[1].set_ylim(0, 2)\naxes_2[2].set_ylim(0, 2)\n\n# Finalize and print\nsns.despine()\nplt.tight_layout(h_pad=2.0)\nplt.savefig('f_fix_count.png', transparent=trans_param)\nplt.show()\n\n# Figure 4 setup\nfig_3, axes_3 = plt.subplots(figsize=(6, 6), nrows=1, ncols=1)\n\n# data\ntarget_final_fix_means = target_final_fix_RT.mean()\ntarget_sem_final_fix_means = target_final_fix_RT.sem() # need to fix to within participants)\n\n# Draw graph and error bar\naxes_3.bar(np.arange(conditions), target_final_fix_means, color=colors, edgecolor='black', linewidth=2)\naxes_3.errorbar(np.arange(conditions), target_final_fix_means, yerr=target_sem_final_fix_means, fmt=' ', ecolor=errbar_color,\n elinewidth=errbar_line_width, capsize=errbar_capsize, capthick=errbar_capthick)\n\n# title stuff\naxes_3.set_title('Final Fixation Duration', size=20, color=font_color)\n\n# x axis stuff\nplt.setp(axes_3, xticks=[i for i in range(conditions)], xticklabels=conditions_x)\n\n# y-axis stuff\naxes_3.set_ylabel('Fixation Duration (ms)', color=font_color)\n\n# Add # of participants to graph\naxes_3.text(2, -100, 'n = ' + str(total_par), color=font_color)\n\n# limit y axis\naxes_3.set_ylim(0, 650)\n\n# Finalize and print\nsns.despine()\nplt.tight_layout(h_pad=2.0)\nplt.savefig('f_final_fix_dur.png', transparent=trans_param)\nplt.show()\n\n## NOTES from Malcolm 2009 and 2010\n\n# Proportion of Trials in Which Target ROI Was Fixated First\n# Search initiation time = time from appearance of the search scene until the first saccade away from the initial fixation point (the initial saccade latency) and measures the time needed to begin search.\n# Scanning time = time from the end of first saccade to first fixation on the target object and represents the actual search process.\n# Verification time = participant’s gaze duration on the target object, reflecting the time needed to decide that the fixated object is actually the target.\n# Total trial duration = the RT measure reported in most previous visual search studies, is equal to the sum of these three epochs (Figure 1).","repo_name":"josephnah/tnt_incid-mem","sub_path":"analysis_tnt_incid_mem_eyetrack.py","file_name":"analysis_tnt_incid_mem_eyetrack.py","file_ext":"py","file_size_in_byte":24632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19186725861","text":"class Solution:\n def minMoves(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n min_num = min(nums)\n\n moves = 0\n for number in nums:\n moves += (number - min_num)\n \n return moves\ndef main():\n sol = Solution()\n print(sol.minMoves([1, 2, 3]))\n\nif __name__ == '__main__':\n main()\n","repo_name":"pololee/oj-leetcode","sub_path":"problems/p453/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8077047375","text":"import scrapy\n\n\nclass Spider(scrapy.Spider):\n name = \"pyimagesearch\"\n # 'https://learnopencv.com/object-tracking-using-opencv-cpp-python/',\n # start_urls = ['https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/']\n start_urls = ['https://www.pyimagesearch.com/topics/']\n\n def parse(self, response):\n TOPIC_SELECTOR = '.topic'\n for topic in response.css(TOPIC_SELECTOR):\n TOPIC_REF_SELECTOR = '.topic__title a ::attr(href)'\n for topic_ref in topic.css(TOPIC_REF_SELECTOR).extract():\n yield scrapy.Request(\n response.urljoin(topic_ref),\n callback=self.parse_topic\n )\n\n def parse_topic(self, response):\n ARTICLE_SELECTOR = '.post-summary'\n for article in response.css(ARTICLE_SELECTOR):\n ARTICLE_REF_SELECTOR = '.post-summary a ::attr(href)'\n for article_ref in article.css(ARTICLE_REF_SELECTOR).extract():\n yield scrapy.Request(\n response.urljoin(article_ref),\n callback=self.parse_article\n )\n\n NEXT_PAGE_SELECTOR = '.pagination-next a ::attr(href)'\n next_page = response.css(NEXT_PAGE_SELECTOR).extract_first()\n if next_page:\n yield scrapy.Request(\n response.urljoin(next_page),\n callback=self.parse_topic\n )\n\n def parse_article(self, response):\n TITLE_SELECTOR = 'title ::text'\n title = response.css(TITLE_SELECTOR)\n CONTENT_SELECTOR = '.entry-content'\n content = response.css(CONTENT_SELECTOR)\n if content:\n yield {\n 'link': response.url,\n 'name': title.extract_first(),\n 'article': content.extract_first(),\n }\n","repo_name":"Goader/search_engine","sub_path":"scripts/scrapers/pyimageseach.py","file_name":"pyimageseach.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13340955047","text":"from typing import List\n\n\nclass Solution:\n\n def findBorder(self, height: List[int], start_index: int):\n height_len = len(height)\n if start_index >= height_len:\n return None\n for i in range(start_index, height_len - 1):\n if height[i] > height[i + 1]:\n return i\n if height[height_len - 1] >= height[height_len - 2]:\n return height_len - 1\n\n def findRightBorder(self, height: List[int], start_index: int, left_border_val: int):\n if start_index >= len(height):\n return None\n\n if height[start_index] >= left_border_val:\n return start_index\n\n max_index = start_index - 1\n max_height = height[max_index]\n height_len = len(height)\n\n for i in range(start_index, height_len):\n if height[i] > max_height:\n max_height = height[i]\n max_index = i\n if max_height >= left_border_val:\n return max_index\n if i < height_len:\n return max_index\n else:\n return None\n\n def calSectionWater(self, height: List[int], start: int, stop: int):\n shortter = min(height[start], height[stop])\n sum = 0\n for i in range(start + 1, stop):\n if shortter > height[i]:\n sum += shortter - height[i]\n return sum\n\n def trap(self, height: List[int]) -> int:\n sum = 0\n index = 0\n while index < len(height):\n left = self.findBorder(height, index)\n right = self.findRightBorder(height, left + 2, height[left])\n if not right:\n break\n sum += self.calSectionWater(height, left, right)\n index = right\n return sum\n\ns = Solution()\nprint(s.trap(\n# [4,2,3]\n# [0,1,0,2,1,0,1,3,2,1,2,1]\n [4,7,7,1,0]\n))\n","repo_name":"furutuki/LeetCodeSolution","sub_path":"0042. Trapping Rain Water/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16099827029","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n# Función para incorporar dentro de las figuras,\n# los valores al final de los barplot\n# Parametro obligatorio la figura\n# Parametro opcional :H V en funcion de diagrama de barra horizontal o vertical\n# Parametro opcional: la separación\n\ndef show_values_on_bars(axs, h_v=\"v\", space=0.4):\n def _show_on_single_plot(ax):\n if h_v == \"v\":\n for p in ax.patches:\n _x = p.get_x() + p.get_width() / 2\n _y = p.get_y() + p.get_height()\n value = float(p.get_height())\n ax.text(_x, _y, value, ha=\"center\")\n elif h_v == \"h\":\n for p in ax.patches:\n _x = p.get_x() + p.get_width() + float(space)\n _y = p.get_y() + p.get_height()\n value = float(p.get_width())\n ax.text(_x, _y, value, ha=\"left\")\n\n if isinstance(axs, np.ndarray):\n for idx, ax in np.ndenumerate(axs):\n _show_on_single_plot(ax)\n else:\n _show_on_single_plot(axs)\n\n\n# Portada con heatmap del streamlit\ndef home(df):\n st.title(\"El potencial económico del teletrabajo\")\n st.write(\"Visión general del teletrabajo.\")\n st.image(\"images/teletrabajo.jpeg\")\n\n with st.beta_expander(\"Idea General\"):\n st.write('El teletrabajo antes del covid se situaba en una media europea de casi el 15% en europa, '\n 'con una clara ventaja en los paises nordicos,con una media del 30%, destacando Paises Bajos con el '\n '45% .')\n st.write('En el caso de España la media precovid se situaba en el 7%, no obstante según el paper del Banco de '\n 'España: '\n 'Artículos analiticos 2/2020 El Teletrabajo en España, '\n 'tenemos un potencial para teletrabajar del 30% del mercado laboral y un 50% en profesiones '\n 'intelectuales')\n st.write('Con un potencial tan alto y una población tan poco adaptada al teletrabajo, además de las ventajas '\n 'como la conciliación etc... '\n 'el planteamiento de este EDA es el siguiente:')\n st.write('Verificar el potencial económico y la posiblidad de elegir tu lugar de residencia en base a '\n 'preferencias personales y no laborales, '\n 'lo que nos va a llevar a analizar los siguientes puntos')\n st.write('-Análisis genérico de la situación del mercado laboral en función de la tasa de paro y los afiliados '\n 'a la seguridad social')\n st.write('-Rendimiento del salario en las diferences provincias, pudiendo valorar el cambio de poder '\n 'adquisitivo según en que provincia estemos ')\n st.write('-Contrastar los precios del mercado inmobiliario para poder valorar donde residir')\n\n st.image(\"images/teletrabajo-españa.jpeg\")\n\n# heatmap inicial para hablar de algunas correlaciones\n plt.figure(figsize=(10, 10))\n sns.heatmap(df.corr(), vmin=-1, vmax=1, center=0,\n cmap=sns.diverging_palette(220, 20, as_cmap=True),\n square=True, linewidths=.5, annot=True)\n plt.savefig('images/heat.jpeg', dpi=400)\n st.image('images/heat.jpeg')\n\n with st.beta_expander(\"Vistazo genérico a los datos\"):\n st.write(df.head())\n\n with st.beta_expander('Conclusiones iniciales:'):\n st.write('-Correlaciones casi perfectas entre m2 escriturado y precio m2, la razón por la que no es perfecta '\n 'como veremos a continuación, es la fuente de los datos')\n st.write('-Correlación directa alta entre salario, precio de compra y coste de vida')\n st.write('Una posible falsa correlación entre tasa de paro y los precios en 2016')\n st.write('Sobre todo ello hablaremos en las siguientes pestañas.')\n\n# Pagina donde analizar el paro en relación a la seguridad socia\ndef pulso(df, alquiler, comunidades):\n\n # -Devuelve tasaparo,\n # -argumento obligatorio dataframe con los datos generados en exploracion de datos\n # -argumentos opcional, start y stop, para seleccionar un conjunto concreto de provincias. Por defecto todo el conjunto.\n # -argumento opcional, orden, para organizar la salida en funcion de otra columna. Por defecto el propio paro\n def paro(df, start=None, stop=None, orden='Tasa Paro '):\n fysize = 10\n if start != None or stop != None:\n fysize = 5\n fig = plt.figure(figsize=(10, fysize), dpi=100)\n ax = fig.gca()\n tasaparo = sns.barplot(x='Tasa Paro ',\n y='provincias',\n ax=ax,\n palette='cool',\n data=df[:].sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Provincia\")\n ax.set_xlabel(\"Tasa de paro\")\n ax.set_title(\"Tasa de paro por provincias\")\n show_values_on_bars(tasaparo, \"h\", 0.3)\n return tasaparo\n\n st.subheader('Situación mercado laboral')\n st.write('Demos un vistazo a la tasa de paro por provincias:')\n\n # Llamamos a la función, pero dado el funcionamiento de streamlit con seaborn, sería ams viable tener la imagen\n # generada a parte y cargarla.\n paro(df)\n plt.tight_layout()\n plt.savefig('images/tasaparo.jpeg', dpi=500)\n st.image('images/tasaparo.jpeg')\n\n with st.beta_expander('Pulse si quiere mas parámetros'):\n checkmas = st.checkbox('Ver provincias con mas paro')\n checkmenos = st.checkbox('Ver provincias con menos paro')\n if checkmas:\n paro(df, 0, 20)\n plt.tight_layout()\n plt.savefig('images/tasaparomas.jpeg', dpi=500)\n st.image('images/tasaparomas.jpeg')\n if checkmenos:\n paro(df, 20)\n plt.tight_layout()\n plt.savefig('images/tasaparomenos.jpeg', dpi=500)\n st.image('images/tasaparomenos.jpeg')\n\n\n # Devuelve afiliados a la seguridad social\n def afiliados_SS(df, start=None, stop=None, orden='Afiliados SS'):\n fysize = 6\n if start != None or stop != None:\n fysize = 3\n fig = plt.figure(figsize=(14, fysize), dpi=100)\n ax = fig.gca()\n afiliados = sns.barplot(x='Afiliados SS',\n y='provincias',\n ax=ax,\n palette='cool',\n data=df[:].sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"comunidades\")\n ax.set_xlabel(\"Afiliados\")\n ax.set_title(\"Afiliados SS por comunidad\")\n show_values_on_bars(afiliados, \"h\", 0.3)\n return afiliados\n\n st.write('Veamos los afiliados a la seguridad social por comunidades')\n afiliados_SS(comunidades)\n plt.tight_layout()\n plt.savefig('images/afiliados.jpeg', dpi=500)\n st.image('images/afiliados.jpeg')\n\n with st.beta_expander('Conclusiones iniciales Tasa paro y afiliación'):\n st.write('Si observamos por número de afiliados:')\n st.write('Destacan 3 comunidades, Cataluña, Madrid y Andalucia. No obstante si contrastamos con la tasa de '\n 'paro media, en Andalucía la media entre provincias es de mas del 20%, mas de 5 puntos por encima '\n 'que las otras dos comunidades lo que podría significar un menor dinamismo empresarial, '\n 'y un mercadolaboral con menos oportunidades')\n st.write('Si miramos solamente a Tasa de Paro, podemos observar que en las provincias con menor paro, '\n 'tampoco existe un gran número de afiliados a la ss, lo que podria ser un sintoma de estancamiento o '\n 'poca flexibilidad para absorver a mas trabajadores')\n st.write('En conclusión aunque estamos cruzando unos datos algo genéricos, y requeriria un mayor análisis, '\n 'en este contexto del EDA, no era el punto mas importante. Pudiendo concluir que para la elección de '\n 'donde trabajar en base a estos indicadores necesitaríamos buscar un equilibrio entre un volumen de '\n 'afiliados alto, y una tasa de paro por debajo de la media española. 15,5%')\n\n# Pagina donde analizar el salario en relación al coste de vida\ndef rendimiento(df,comunidades):\n # -Devuelve compsalario, comparativa del salario medio por comunidad\n # -argumento obligatorio dataframe con los datos generados en exploracion de datos\n # -argumentos opcional, start y stop, para seleccionar un conjunto concreto de provincia. Por defecto todo el conjunto.\n # -argumento opcional, orden, para organizar la salida en funcion de otra columna. Por defecto el salario medio\n\n\n def compararsalario(df, start=None, stop=None, orden='Salario Medio'):\n fysize = 6\n if start != None or stop != None:\n fysize = 3\n fig = plt.figure(figsize=(14, fysize), dpi=100)\n ax = fig.gca()\n compsalario = sns.barplot(x='Salario Medio',\n y='provincias',\n ax=ax,\n palette='cool',\n data=df.sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Comunidad\")\n ax.set_xlabel(\"Salario medio en € y bruto\")\n ax.set_title(\"Salario medio por comunidad\")\n show_values_on_bars(compsalario, \"h\", 0.3)\n return compsalario\n\n\n # -Devuelve compcoste, comparativa de los coste de vida\n # -argumento obligatorio dataframe con los datos generados en exploracion de datos\n # -argumentos opcional, start y stop, para seleccionar un conjunto concreto de provincia. Por defecto todo el conjunto.\n # -argumento opcional, orden, para organizar la salida en funcion de otra columna. Por defecto el coste de vida\n\n def costevida(df, start=None, stop=None, orden='Coste de vida'):\n fysize = 6\n if start != None or stop != None:\n fysize = 3\n fig = plt.figure(figsize=(12, fysize), dpi=100)\n ax = fig.gca()\n compcoste = sns.barplot(x='Coste de vida',\n y='provincias',\n ax=ax,\n palette='cool',\n data=df.sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Comunidad\")\n ax.set_xlabel(\"Coste de vida\")\n ax.set_title(\"Coste medio comunidad\")\n show_values_on_bars(compcoste, \"h\", 0.3)\n return compcoste\n\n\n # -Devuelve poderad , equiparar salarios en base a la media española.\n # -argumento obligatorio dataframe con los datos generados en exploracion de datos\n # -argumentos opcional, start y stop, para seleccionar un conjunto concreto de provincia. Por defecto todo el conjunto.\n # -argumento opcional, orden, para organizar la salida en funcion de otra columna. Por defecto el salario medio\n\n comunidades['poder adquisitivo'] = (((100 - comunidades['Coste de vida']) / 100) + 1) * comunidades['Salario Medio']\n comunidades['poder adquisitivo'] = comunidades['poder adquisitivo'].round()\n\n def poder_adquisitivo(df, start=None, stop=None, orden='poder adquisitivo'):\n fysize = 6\n if start != None or stop != None:\n fysize = 3\n fig = plt.figure(figsize=(14, fysize), dpi=100)\n ax = fig.gca()\n poderad = sns.barplot(x='poder adquisitivo',\n y='provincias',\n ax=ax,\n palette='cool',\n data=df.sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Comunidad\")\n ax.set_xlabel(\"Poder adquisisitivo\")\n ax.set_title(\"Poder adquisisitivo en referencia a Coste Vida España\")\n show_values_on_bars(poderad, \"h\", 0.3)\n return poderad\n\n\n st.subheader('Rendimiento salario como poder adquisistivo según comunidad')\n st.write('El objetivo es analizar el salario en función del coste de vida')\n checksalario = st.checkbox('Ver salario por comunidades')\n checkscoste = st.checkbox('Ver coste de vida por comunidades')\n checkpoderad = st.checkbox('Ver poder adquisitivo por comunidades')\n\n if checksalario:\n compararsalario(comunidades)\n plt.tight_layout()\n plt.savefig('images/afiliados.jpeg', dpi=500)\n st.image('images/afiliados.jpeg')\n if checkscoste:\n costevida(comunidades)\n plt.tight_layout()\n plt.savefig('images/coste.jpeg', dpi=500)\n st.image('images/coste.jpeg')\n if checkpoderad:\n poder_adquisitivo(comunidades)\n plt.tight_layout()\n plt.savefig('images/poder.jpeg', dpi=500)\n st.image('images/poder.jpeg')\n\n with st.beta_expander('Pulse para calcular rendimiento en base a otra comunidad'):\n def trabajas_vives(df, provincia_trabajas, provincia_vives):\n mask1 = df['provincias'] == provincia_trabajas\n mask2 = df['provincias'] == provincia_vives\n modificador = ((df[mask1]['Coste de vida'].iloc[0] - df[mask2]['Coste de vida'].iloc[0]) / 100) + 1\n poder_adquisitivo = df[mask1]['S Medio'].iloc[0] * modificador\n return poder_adquisitivo\n\n provincias = list(df['provincias'].unique())\n\n menutv = st.selectbox(\"Selecciona la provincia para la que trabajas:\", provincias)\n menuresi = st.selectbox(\"Selecciona la provincia en al que resides o quieres residir:\", provincias)\n st.write('El salario medio en: ',menutv , 'de', df['S Medio'][df['provincias'] == menutv])\n st.write('Equivale a un poder adquisitivo de :',trabajas_vives(df, menutv, menuresi), 'en ', menuresi)\n\n with st.beta_expander('Conclusiones:'):\n st.write('Aunque no se ve en las gráficas mostradas, si se consulta los datos usados del INE para calcular el '\n 'salario vemos que la diferencia de la mediana de los salarios por comunidad es muy parecida '\n 'hablamos de un abanico de menos de 50€ brutos.')\n st.write('No obstante el salario medio si que tiene grandes diferencias, esto implica que los mínimos y '\n 'máximos son bastante diferentes por comundiad.')\n st.write('Si añadimos el coste de vida, obtenido a partir de las variables de renta familiar disponible per '\n 'cápita y del coste de la vivienda. Estas dos variables muestran una buena capacidad predictiva para '\n 'las paridades del poder adquisitivo (PPA) subnacionales estimadas por el Bureau of Economic '\n 'Analysis de los Estados Unidos, obtendremos:')\n st.write('Si recordamos el heatmap de correlación, coste de vida y precio de vivienda tiene una correlación '\n 'directa, se confirma al conocer como se calcula el coste de vida')\n st.write('Y si analizamos los salarios en base al coste de vida, vemos que el poder adquisitivo cambia '\n 'radicalmente para algunas comunidades, comunidades como Madrid, pasan a ser la penúltima en poder '\n 'adquisitivo en base a media del coste de vida en España')\n st.write('Para calcular en base del poder adquisitivo, hemos calculado un modificador en base al coste de '\n 'vida en España y multiplicandolo por el salario medio.')\n\n# Pagina donde ver alquiler m2, importe medio vivienda, tamaño medio vivienda, m2 compra\ndef vivir(df, alquiler):\n\n st.subheader('Comprativa de precios de alquiler, compra y tamaño medio por comunidades')\n st.write('Objetivo es tener una visión general entre las diferencias territoriales en el entorno inmobiliario')\n reversed_alquiler = alquiler.iloc[::-1]\n provincias = list(df['provincias'].unique())\n\n # Visualización histórico m2 alquiler provincia\n def histalquiler(reversed_alquiler, provincia):\n trace1 = go.Scatter(\n x=reversed_alquiler['Meses'],\n y=reversed_alquiler[provincia],\n name='Salario Medio unidad 10',\n mode='lines',\n marker=dict(color='rgba(16, 112, 2, 0.8)'),\n text=reversed_alquiler['Madrid'])\n data = [trace1, ]\n\n layout = dict(title='Datos grupales provincias',\n xaxis=dict(title='Provincias', ticklen=5)\n )\n fig = go.Figure(data=data, layout=layout)\n st.plotly_chart(fig)\n\n\n # -Devuelve compalquiler, comparativa en base al precio del alquiler por m2 por provincia.\n # -argumento obligatorio dataframe con los datos generados en exploracion de datos\n # -argumentos opcional, start y stop, para seleccionar un conjunto concreto de provincia. Por defecto todo el conjunto.\n # -argumento opcional, orden, para organizar la salida en funcion de otra columna. Por defecto Precio m2 Alquiler\n\n def compararalquilerm2(df, start=None, stop=None, orden='Precio m2 Alquiler'):\n fysize = 12\n if start != None or stop != None:\n fysize = 5\n fig = plt.figure(figsize=(14, fysize), dpi=100)\n ax = fig.gca()\n compalquiler = sns.barplot(x='Precio m2 Alquiler',\n y='provincias',\n ax=ax,\n palette='cool',\n data=df.sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Provincia\")\n ax.set_xlabel(\"Precio m2 alquiler\")\n ax.set_title(\"Precio del metro cuadrado de alquiler por provincias\")\n show_values_on_bars(compalquiler, \"h\", 0.3)\n return compalquiler\n\n # Visualizaciñon conjunta de m2 compra\n def compararcompram2(df, start=None, stop=None, orden='Precio compra m2'):\n fysize = 15\n if start != None or stop != None:\n fysize = 7\n fig = plt.figure(figsize=(fysize, 4), dpi=100)\n ax = fig.gca()\n compm2v = sns.barplot(x='provincias',\n y='Precio compra m2',\n ax=ax,\n palette='cool',\n data=df.sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Precio compra m2\")\n ax.set_xlabel(\"Provincia\")\n ax.set_title(\"Precio medio compra m2\")\n plt.xticks(rotation=80)\n return compm2v\n\n\n # -Devuelve compventas, comparativa del precio medio de compra escriturado\n # -argumento obligatorio dataframe con los datos generados en exploracion de datos\n # -argumentos opcional, start y stop, para seleccionar un conjunto concreto de provincia. Por defecto todo el conjunto.\n # -argumento opcional, orden, para organizar la salida en funcion de otra columna. Por defecto el VM\n\n def compararcompras(df, start=None, stop=None, orden='VM'):\n fysize = 15\n if start != None or stop != None:\n fysize = 7\n fig = plt.figure(figsize=(fysize, 4), dpi=100)\n ax = fig.gca()\n compventas = sns.barplot(x='provincias',\n y='VM',\n ax=ax,\n palette='cool',\n data=df.sort_values(by=orden, ascending=False)[start:stop],\n ci=None)\n ax.set_ylabel(\"Precio medio compra\")\n ax.set_xlabel(\"Provincia\")\n ax.set_title(\"Precio medio de compra escriturada\")\n plt.xticks(rotation=80)\n return compventas\n\n # Tamaño medio vivienda escriturada\n df['Tamaño medio construido'] = (df['VM'] / df['Precio compra m2']).astype(int)\n\n def comparartam(df):\n trace2 = go.Scatter(\n x=df['provincias'],\n y=df['Tamaño medio construido'],\n name='Tamaño medio construido',\n mode='lines',\n marker=dict(color='rgba(16, 112, 2, 0.8)'),\n text=df['Tamaño medio construido'])\n data = [trace2]\n\n layout = dict(title='Tamaño medio construido por provincia',\n xaxis=dict(title='Provincias', ticklen=5)\n )\n fig = go.Figure(data=data, layout=layout)\n st.plotly_chart(fig)\n\n checkalquilerm2 = st.sidebar.checkbox('Comparar precio m2 de alquiler vivienda')\n checkcompram2 = st.sidebar.checkbox('Comparar precio m2 de compra vivienda')\n checkcompras = st.sidebar.checkbox('Comparar precio medio escriturado compra vivienda')\n checktam = st.sidebar.checkbox('Comparar tamaño medio vivienda escriturada')\n\n if checkalquilerm2:\n compararalquilerm2(df)\n plt.tight_layout()\n plt.savefig('images/alquilerm2.jpeg', dpi=500)\n st.image('images/alquilerm2.jpeg')\n if checkcompram2:\n compararcompram2(df)\n plt.tight_layout()\n plt.savefig('images/compram2.jpeg', dpi=500)\n st.image('images/compram2.jpeg')\n if checkcompras:\n compararcompras(df)\n plt.tight_layout()\n plt.savefig('images/compra.jpeg', dpi=500)\n st.image('images/compra.jpeg')\n if checktam:\n comparartam(df)\n\n with st.beta_expander(\"Selecciona la provincia que deseas ver:\"):\n menuprov = st.selectbox('Provincias :', provincias)\n histalquiler(reversed_alquiler, menuprov)\n\n with st.beta_expander('Conclusiones:'):\n st.write('Una vez hemos visto como cambia drásticamente el poder adquisitivo según provincia de trabajo y '\n 'provincia de residencia se hace interesante para poder tomar una decisión valorar el mercado '\n 'inmobiliario para el alquiler y/o al compra:')\n st.write('Podemos observar una correlación directa entre el precio/m2 de alquiler y el de precio/m2 compra. '\n 'Por lo que de este punto no tendremos una decisión clara de si alquilar o comprar, y será una '\n 'opción ams personal.')\n st.write('Hemos obtenido el tamaño medio escriturado al calcular el precio medio escriturado y precio m2.'\n 'Tenemos que tener en cuenta 2 puntos, primero las fuentes:')\n st.write('Mientras que el precio de compra escriturado es fuente oficial INE, el precio del m2, la fuente es '\n 'idealista. Idealista calcula sus precios en base al anuncio, no al precio final, de ahi que la '\n 'correlación siendo directa no sea perfecta')\n st.write('La segunda que podemos observar, que el tamaño medio a nivel nacional se mueve entre 75 y 105 m2 '\n 'construidos, con un posible outlier en Cáceres que merece una posble revision en el futuro.')\n st.write('Los datos usados son: precio m2 con última actualización en JUNIO, y datos del INE con misma fecha')\n\ndef cuotafija(cap, i, t):\n cuota = cap * (((1 + i) ** t) * i) / (((1 + i) ** t) - 1)\n return cuota\n\n# Cuadro de amortización\ndef cuadro_amortizacion(cap, t, i, cuota):\n cap_pen = cap\n mes = 1\n cuad_amort = pd.DataFrame(columns= ['Mes', 'Cuota', 'Amortización', 'Pendiente'])\n\n while mes <= t:\n intereses = (cap_pen * i)\n amortizacion = cuota - intereses\n cap_pen = cap_pen - amortizacion\n cuad_amort = cuad_amort.append({'Mes': mes,\n 'Cuota': int(cuota),\n 'Amortización':int(amortizacion),\n 'Pendiente':int(cap_pen) }, ignore_index=True)\n mes += 1\n return cuad_amort\n\n\n# Calculadora hipotecaria\ndef calculadora_hipoteca():\n # Constantes\n AJD = 0\n IVA = 0.1\n IGC = 0.065\n\n # Variables de Control\n impuesto = 0\n entrada = 0\n interes = 0\n importe_solicitado = 0\n correcto = False\n\n # Lista de comunidades para menu y selección del key del diccionario\n comunidades = [\"Andalucia\", \"Aragón\", \"Canarias\", \"Cantabria\", \"Castilla y León\",\n \"Castilla-La Mancha\", \"Cataluña\", \"Ceuta\", \"Melilla\", \"Comunidad de Madrid\",\n \"Navarra\", \"Comunidad Valenciana\", \"Extremadura\", \"Galicia\",\n \"Islas Baleares\", \"La Rioja\", \"País Vasco\", \"Asturias\", \"Murcia\"]\n\n # Diccionario key = comunidad, primer valor impuesto aplicable en la fórmula,\n # segundo valor interes general comunidad\n # tercer valor lista con el valor impositivo especial\n comunidadesdic = {\"Andalucia\": [0.08, \"del 8 al 10%\", [\"7 % para vivienda habitual de no más de 130.000 €.\",\n \"3,5% para vivienda habitual de no más de 130.000 € destinada a un menor de 35 años\",\n \"o de no más de 180.000 € destinada a una persona con discapacidad superior al 33 % o\",\n \"miembro de una familia numerosa.\\n\"]],\n \"Aragón\": [0.08, \"del 8 al 10%\", [\"Bonificación del 12,5 % por vivienda habitual de no \",\n \"más de 100.000 € para menores de 35 años\\n\",\n \"con discapacidad superior al 65% o mujeres víctimas de violencia de género.\\n\",\n \"Bonificación del 50 % por compra de vivienda habitual para familias numerosas.\\n\"]],\n \"Canarias\": [0.065, \"del 6.5%\", [\"5 % para la compra de vivienda habitual.\",\n \"1 % para la compra de vivienda habitual de: familias numerosas o monoparentales\",\n \"y personas con discapacidad física.\\n20 % de bonificación para menores de 35 años \",\n \"y mujeres víctimas de violencia de género.\\n\"]],\n \"Cantabria\": [0.08, \"del 8 al 10%\", [\"5,5 % para vivienda protegida\\n\",\n \"5 % para la compra de vivienda habitual de familias numerosas, menores de 30 años,\",\n \"personas con minusvalía \\n superior al 33 % o viviendas que se vayan a rehabilitar.\\n\",\n \"4 % para la compra de vivienda habitual de personas con minusvalía superior al 65 %.\"]],\n \"Castilla y León\": [0.08, \"del 8%\",\n [\"4 % para la compra de vivienda habitual destinada a familia numerosa,\",\n \"adquiriente o familiar\\n\",\n \"con discapacidad superior al 65%, primera vivienda de menores de 36 años\",\n \"o vivienda protegida\\n\",\n \"si es la primera vivienda de los adquirientes.\\n\"]],\n \"Castilla-La Mancha\": [0.08, \"del 8\",\n [\"6 % para la compra de la primera vivienda habitual del\",\n \"contribuyente, siempre que \\n\",\n \"no supere los 180.000 € y financie al menos el 50%.\\n\"]],\n \"Cataluña\": [0.1, \"del 10 al 11%\", [\"7 % para viviendas de protección oficial.\\n\",\n \"5 % para familia numerosas, menores de 32 años\",\n \"o personas con minusvalía superior al 65 %.\\n\"]],\n \"Ceuta\": [0.06, \"del 6%\", [\"50 % de bonificación si el inmueble está situado en Ceuta\\n\"]],\n \"Melilla\": [0.06, \"del 6%\",\n [\"50 % de bonificación si el inmueble está situado en Melilla\\n\"]],\n \"Comunidad de Madrid\": [0.06, \"del 6%\", [\n \"4 % para la compra de una vivienda habitual destinada a familia numerosa.\\n\",\n \"10 % de bonificación si se trata de la compra de una vivienda habitual.\\n\"]],\n \"Navarra\": [0.06, \"del 6%\", [\"5 % para los primeros 180.303,63 € de una vivienda\",\n \"habitual destinada para familias de dos o más hijos.\"]],\n \"Comunidad Valenciana\": [0.1, \"del 10%\", [\n \"8 % para viviendas de protección pública de régimen general o\\n primera vivienda\",\n \"habitual de menores de 35 años.\\n\",\n \"4% para viviendas habituales de protección oficial de régimen\\n especial, \",\n \"familias numerosas o personas con un grado de \\nminusvalía física superior al\",\n \"65 % o psíquico superior al 33 %.\\n\"]],\n \"Extremadura\": [0.08, \"del 8 al 11%\",\n [\"7 % para viviendas cuyo valor sea inferior a los 122.000 € y la\\n\",\n \"suma de las bases imponibles general y del ahorro del\\n contribuyente sea inferior\",\n \"a 19.000 € en declaración individual o 24.000 € en declaración conjunta.\",\n \"4 % par viviendas de protección oficial con precio máximo legal\\n\"]],\n \"Galicia\": [0.1, \"del 10%\", [\n \"7 % para la compra de la vivienda habitual de familias con\\n patrimonio inferior a los 200.000 €.\\n\",\n \"3 % para la compra de la vivienda habitual de personas con\\n minusvalía reconocida\",\n \"superior al 65 %, familia numerosa con\\n patrimonio inferior a los 400.000 € o menores \",\n \"de 36 años con\\n patrimonio inferior a los 200.000 €.\\n\"]],\n \"Islas Baleares\": [0.08, \"del 8 al 11%\",\n [\"5 % para la compra de la primera vivienda habitual, siempre que\\n\",\n \"no supere los 200.000 €.\\n\"]],\n \"La Rioja\": [0.07, \"del 7%\", [\"Tiene tipo reducido del 5 % o el 3% para casos especiales\"]],\n \"País Vasco\": [0.04, \"del 4%\",\n [\"2,50 % para familias numerosas, viviendas de no más de 120 m2\\n\",\n \"(o 300m2 de parcela en unifamiliares) y compra de vivienda habitual\\n\"]],\n \"Asturias\": [0.08, \"del 8 al 10%\", [\"3 % para viviendas de protección oficial.\\n\"]],\n \"Murcia\": [0.08, \"del 8%\", [\"4 % para viviendas protegidas de régimen especial.\\n\",\n \"3 % para vivienda habitual de familias numerosas o menores de\\n35 años,\",\n \"siempre que su valor no supere los 300.000 € en el \\nprimer caso y 150.000 € en el segundo.\\n\"]]}\n\n st.title('Calculadora hipotecaria')\n st.subheader('Rellene el formulario para obtener los siguientes datos:')\n st.write('Información sobre el ahorro necesario para la compra de vivienda, y la diversificación de dicho ahorro entre impuestos, entrada, notaría y gestoría...')\n st.write('Información sobre cuota de la hipoteca, en base a importe solicitado, plazo en años y % de interés fijo')\n st.write('Tabla de amortización')\n\n # Formulario que recoge los datos necesarios para calcular cuota hipoteca, tabla de amortización\n # Recoge: val_viv , será el importe de la vivienda sin impuestos\n # menunuevo, si la vivienda es nueva o de segunda mano, importa para aplicar ITP o IVA\n # menucomunidad, almacena comunidad para saber que ITP concreto aplicar\n # menuviv, si la vivienda es priemra o segunda, así calcular si la entrada es del 20% o 30%\n # intereses , tipo de interes fijo aplicable\n # tiempo, plazo en años para luego calcular número de cuotas.\n with st.form(key=\"my_form\"):\n val_viv = st.number_input('Introduzca el precio de compra sin impuestos')\n\n menunuevo = st.selectbox(\"La vivienda es nueva o de segunda mano\", ('', 'Nueva', 'Segunda mano'))\n if menunuevo == 'Nueva':\n impuesto = IVA\n AJD = 0.015\n\n else:\n menucomunidad = st.selectbox(\"Seleccione la comunidad de compra:\", comunidades)\n impuesto = comunidadesdic[menucomunidad][0]\n\n menuviv = st.selectbox(\"Seleccione si es primera vivienda o segunda:\",\n ('', 'Primera vivienda', 'Segunda vivienda'))\n if menuviv == 'Primera vivienda':\n entrada = 0.2\n elif menuviv == 'Segunda vivienda':\n entrada = 0.3\n\n importe_solicitado = st.number_input('Que importe desea solicitar?')\n\n interes = st.number_input(' A que % de intereses quiere el cálculo')\n interes = (interes / 100) / 12\n\n tiempo = st.slider('A cuantos años quiere la hipoteca?', min_value=1, max_value=30)\n tiempo = tiempo * 12\n\n submitted = st.form_submit_button(\"Ver informe\")\n\n # Controla que no se genere el informe sino has introducido los parámetros principales\n if (val_viv != 0) and (impuesto != 0) and (importe_solicitado != 0) and (interes != 0) and (entrada != 0):\n correcto = True\n\n # Crea tablas de informe e impresiones por apntalla de datos\n if submitted and correcto:\n ahorro_nec = ((val_viv * entrada) + (val_viv * impuesto) + 2000 + (AJD * val_viv))\n ahorro_necesario = [int(ahorro_nec)]\n entradaviv = [int(val_viv * entrada)]\n impuestoviv = [int(val_viv * impuesto)]\n notaria_gestoria = [2000]\n ajdviv = [int(AJD * val_viv)]\n calculadora = pd.DataFrame({'AHORRO NECESARIO': ahorro_necesario,\n 'ENTRADA': entradaviv,\n 'IMPUESTOS': impuestoviv,\n 'NOTARIA Y GESTORIA': notaria_gestoria})\n if AJD != 0:\n calculadora['AJD'] = ajdviv\n cuota = cuotafija(importe_solicitado, interes, tiempo)\n porcentajesol = (importe_solicitado * 100) / val_viv\n resumenhip = pd.DataFrame(columns=['Precio Compra', 'Importe Hip.', 'Cuota Hip.', 'Nº Cuotas', 'Porcentaje Solicitado %'])\n resumenhip = resumenhip.append({'Precio Compra': val_viv,\n 'Importe Hip.': importe_solicitado,\n 'Cuota Hip.': int(cuota),\n 'Nº Cuotas': tiempo,\n 'Porcentaje Solicitado %': porcentajesol}, ignore_index=True)\n st.table(resumenhip)\n if menunuevo == 'Nueva':\n impuesto = IVA\n AJD = 0.015\n st.write(\n \"\\n El impuesto a pagar es del 10% salvo en canarias 6,5% sobre el valor de compra de la casa\")\n st.write(\n \"\\n El impuesto del AJD va del 0,5% al 1,5% que es la cuantía mas común, usaremos la comun por simplificar\")\n else:\n if menucomunidad != '':\n st.write(\"El impuesto es el ITP Impuesto de Transmision Patrimonial.\")\n st.write(\"En su comunidad:\", menucomunidad, \"el tipo general es\",\n comunidadesdic[menucomunidad][1])\n st.write(\"Con tipos especiales de:\")\n tipo_especial = \"\"\n for i in comunidadesdic[menucomunidad][2]:\n tipo_especial += i\n st.write(tipo_especial)\n if (importe_solicitado > (val_viv * (1 - entrada))) and (\n importe_solicitado <= ((val_viv * ((1 - entrada) + 0.1)))):\n st.write(\"CUIDADO: VA A NECESITAR MAS TASACIÓN QUE VALOR DE COMPRA PARA TENER ALGUNA POSIBILIDAD\")\n elif importe_solicitado > (((val_viv * (1 - entrada) + 0.1))) and importe_solicitado <= val_viv:\n st.write(\"Salvo funcionario o doble garantía hipotecaria es muy dificil que le den su hipoteca\\n\",\n \"Una doble garantia hipotecaria es un garantia limitada, que se aplica sobre un segundo bien inmueble\\n\",\n \"que absorve el exceso de financiación requerida\")\n elif importe_solicitado == (val_viv * (1 - entrada)):\n st.write('Perfecto, el importe encaja con los topes generales que tienen los bancos')\n elif importe_solicitado < (val_viv * (1 - entrada)):\n st.write(\"Genial es muy favorable pedir menos que los topes bancarios estandar\")\n else:\n st.write(\"Seguiré con los calculos, pero mas del 100% no dan los bancos\")\n st.write('La cuota de su hipoteca es de:', int(cuota), '€/mes aprox')\n\n st.table(calculadora)\n st.table(cuadro_amortizacion(importe_solicitado, tiempo, interes, cuota))\n else:\n st.warning('Debe completar cada casilla del formulario para poder hacer los cálculos necesarios')\n\ndef conclusiones():\n st.title('Conclusiones Finales')\n st.write('Hemos podido valorar, que efectivamente, el teletrabajo puede suponer un potencial ecónomico en las '\n 'economías personales pudiendo generar un diferencial de poder económico en base al coste de vida medible.'\n 'Lo que conlleva que si la decisión fuese puramente económica nuestro valor de referencia seria salario-'\n 'coste de vida.')\n st.write('Si la decisión es por lugar de residencia deseado, por ejemplo vivir en al costa, valoraríamos poder '\n 'adquisitivo de esas zonas, contrastando con costes de vivienda')\n st.write('No obstante tener en cuenta que por simplificación el ambito usado ha sido comunidad y provincia cuando'\n 'si se quiere tomar una decisión más fundamentada, habría que mirar al ambito municipios, dado que en un '\n 'ámbito tan grande no deja de ser generico y orientativo, dado que dentro de comundiades existen grandes '\n 'diferencias entre los diferentes municipios.')\n","repo_name":"Jcornejo85/EDA_El_Potencial_Economico_Teletrabajo","sub_path":"src/EDA_Teletrabajo_functions.py","file_name":"EDA_Teletrabajo_functions.py","file_ext":"py","file_size_in_byte":38892,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16252401057","text":"'''\nCreated on Jun 1, 2010\n\n@author: jnaous\n'''\nfrom django.db import models\nfrom expedient.common.permissions.models import \\\n ExpedientPermission, PermissionInfo, ObjectPermission, PermissionUser\nfrom django.contrib.contenttypes.models import ContentType\nfrom expedient.common.permissions.exceptions import PermissionCannotBeDelegated,\\\n PermissionRegistrationConflict, PermissionDoesNotExist\nfrom django.http import Http404\nfrom expedient.common.permissions.middleware import PermissionMiddleware\n\ndef _stringify_func(f):\n if callable(f):\n return \"%s.%s\" % (f.__module__, f.__name__)\n else:\n return f\n\ndef register_permission_for_obj_or_class(obj_or_class, permission):\n \"\"\"\n Add L{ObjectPermission}s for a model.\n \n @param obj_or_class: the object instance or class which we wish to add \n the permission for.\n @param permission: the permission's name or the L{ExpedientPermission} instance\n \"\"\"\n \n if not isinstance(obj_or_class, models.Model):\n # assume it's a model class, so get the contenttype for it.\n obj_or_class = ContentType.objects.get_for_model(obj_or_class)\n \n if not isinstance(permission, ExpedientPermission):\n try:\n permission = ExpedientPermission.objects.get(name=permission)\n except ExpedientPermission.DoesNotExist:\n raise PermissionDoesNotExist(permission)\n\n return ObjectPermission.objects.get_or_create_from_instance(\n obj_or_class,\n permission=permission,\n )\n\ndef create_permission(name, view=None):\n \"\"\"\n Create a new L{ExpedientPermission}.\n \n @param name: The name of the permission. Must be globally unique.\n @type name: L{str}\n @keyword view: View to redirect to if a permission is missing. Default None.\n The view function should have the signature::\n \n view(request, permission, user, target_obj_or_class, redirect_to=None)\n \n where C{permission} is an L{ExpedientPermission} instance, C{user} is\n the user object (not necessarily a C{django.contrib.auth.models.User}\n instance), and C{target_obj_or_class} is the object instance or class\n that the user does not have the permission C{permission} for.\n C{redirect_to} is a field used to indicate the original URL that caused\n the L{PermissionDenied} exception. The view should redirect there\n when done.\n \n @type view: Full import path of the view as L{str} or the view function\n object itself. Note that the view must be importable by its a path\n (i.e. cannot use nested functions).\n \n @return: the new L{ExpedientPermission}.\n \"\"\"\n view = _stringify_func(view)\n # check if the permission is registered with a different view somewhere else\n perm, created = ExpedientPermission.objects.get_or_create(\n name=name, defaults=dict(view=view))\n if not created and perm.view != view:\n raise PermissionRegistrationConflict(name, view, perm.view)\n \n return perm\n\ndef give_permission_to(receiver, permission, obj_or_class,\n giver=None, delegatable=False):\n \"\"\"\n Gives permission over object or class to a permission user instance.\n \n @param receiver: The permission user receiving the permission.\n @type receiver: object registered as permission user or L{PermissionUser}\n @param permission: The permission's name or the permission object\n @type permission: L{str} or L{ExpedientPermission} instance\n @param obj_or_class: The object or the class to give permission to.\n @type obj_or_class: model instance or class\n @keyword giver: The permission user giving the permission.\n @type giver: object registered as permission user or L{PermissionUser}\n @keyword delegatable: Can the receiver in turn give the permission out?\n Default is False.\n @type delegatable: L{bool}\n \"\"\"\n \n if not isinstance(obj_or_class, models.Model):\n # assume it's a model class, so get the contenttype for it.\n obj_or_class = ContentType.objects.get_for_model(obj_or_class)\n \n if not isinstance(receiver, PermissionUser):\n receiver, created = PermissionUser.objects.get_or_create_from_instance(\n receiver,\n )\n \n if not isinstance(permission, ExpedientPermission):\n try:\n permission = ExpedientPermission.objects.get(name=permission)\n except ExpedientPermission.DoesNotExist:\n raise PermissionDoesNotExist(permission)\n\n # Is someone delegating the permission?\n if giver:\n # Is the giver a PermissionUser already?\n if not isinstance(giver, PermissionUser):\n giver, created = PermissionUser.objects.get_or_create_from_instance(\n giver,\n )\n # Just created the PermissionUser, so giver cannot have the\n # permission to delegate\n if created:\n raise PermissionCannotBeDelegated(giver, permission.name)\n \n # Check the giver's permissions\n try:\n perm_info = giver.permissioninfo_set.all().get(\n obj_permission__permission=permission,\n obj_permission__object_type=ContentType.objects.get_for_model(\n obj_or_class),\n obj_permission__object_id=obj_or_class.id,\n can_delegate=True,\n )\n obj_perm = perm_info.obj_permission\n except PermissionInfo.DoesNotExist:\n raise PermissionCannotBeDelegated(giver, permission.name)\n else:\n obj_perm, creatd = ObjectPermission.objects.get_or_create_from_instance(\n obj_or_class, permission=permission)\n \n pi, created = PermissionInfo.objects.get_or_create(\n obj_permission=obj_perm,\n user=receiver,\n defaults=dict(can_delegate=delegatable),\n )\n if not created and pi.can_delegate != delegatable:\n pi.can_delegate = delegatable\n pi.save()\n\ndef get_user_from_req(request, *args, **kwargs):\n '''\n Get the user profile from the request. This function is helpful when\n using the require_*_permission_for_view decorators.\n\n For example::\n \n @require_objs_permissions_for_view(\n [\"can_view_obj_detail\"],\n get_user_from_req,\n get_objects_from_filter_func(Obj, 1),\n [\"GET\"],\n )\n def view_obj_detail(request, obj_id):\n ...\n '''\n return request.user\n\ndef get_queryset(klass, index, filter=\"pk\"):\n \"\"\"\n Returns a function that can be used for the require_*_permission_for_view\n decorators to get a queryset from some argument.\n \n The returned function has a signature (*args, **kwargs) and mainly does\n the following::\n \n klass.objects.filter(**{filter: arg})\n \n where C{arg} is obtained from the arguments. If C{index} is an\n C{int}, C{arg} is assumed to be positional. Otherwise, it is assumed to be\n a keyword.\n \n For example::\n \n @require_obj_permission_for_view(\n [\"can_view_obj_detail\"],\n get_user_from_req,\n get_object_from_filter_func(Obj, 1),\n [\"GET\"],\n )\n def view_obj_detail(request, obj_id):\n ...\n \n @param klass: The class of the object to be returned.\n @type klass: class\n @param index: location of the id in the arguments when the arguments are\n given as (*args, **kwargs).\n @type index: C{int} for positional, hashable for keyword.\n @keyword filter: a filter to be used for obtaining the object.\n @type filter: C{str}\n \n @return: A callable that returns an object from (*args, **kwargs)\n \"\"\"\n \n def wrapper(*args, **kwargs):\n if type(index) == int:\n arg = args[index]\n else:\n arg = kwargs[index]\n return klass.objects.filter(**{filter: arg})\n \n return wrapper\n\ndef get_queryset_from_class(klass):\n \"\"\"\n Returns a function usable as the C{target_func} of the\n L{require_objs_permissions_for_view} decorator. The returned function\n returns the C{ContentType} queryset for a class. This can be used to\n enforce class level permissions on views.\n \n @param klass: the model class for which we want the queryset. \n \"\"\"\n def target_func(*args, **kwargs):\n ct = ContentType.objects.get_for_model(klass)\n return ContentType.objects.filter(pk=ct.pk)\n return target_func\n\ndef get_queryset_from_id(klass, id):\n \"\"\"\n Returns a function usable as a C{target_func} parameter. The returned\n function returns a C{QuerySet} containing one object with the given C{id}.\n \n @param klass: the class of the queryset's model.\n @param id: the object's id.\n \"\"\"\n def target_func(*args, **kwargs):\n return klass.objects.filter(id=id)\n return target_func\n\ndef get_object_from_ids(ct_id, id):\n \"\"\"\n Get an object from the ContentType id and from the object's id.\n \n @param ct_id: ContentType's id for the object class.\n @param id: object's id.\n \"\"\"\n try:\n ct = ContentType.objects.get_for_id(ct_id)\n except ContentType.DoesNotExist:\n raise Http404()\n try:\n return ct.get_object_for_this_type(pk=id)\n except ct.model_class().DoesNotExist:\n raise Http404()\n\ndef require_objs_permissions_for_url(url, perm_names, user_func,\n target_func, methods=[\"GET\", \"POST\"]):\n \"\"\"\n Convenience wrapper around L{PermissionMiddleware}.\n \"\"\"\n PermissionMiddleware.add_required_url_permissions(\n url, perm_names, user_func, target_func, methods)\n","repo_name":"fp7-ofelia/ocf","sub_path":"optin_manager/src/python/openflow/common/permissions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9683,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"21990991506","text":"from typing import List\nfrom os import path, walk, listdir\n\nfrom stink.helpers import MemoryStorage\n\n\nclass Grabber:\n \"\"\"\n Collects the specified files from the specified paths.\n \"\"\"\n def __init__(self, paths: List[str], file_types: List[str], check_sub_folders: bool = False):\n\n self.__paths = paths\n self.__file_types = file_types\n self.__check_sub_folders = check_sub_folders\n\n self.__storage = MemoryStorage()\n self.__folder = \"Grabber\"\n\n def __grab_files(self) -> None:\n \"\"\"\n Collects the specified files from the specified paths.\n\n Parameters:\n - None.\n\n Returns:\n - None.\n \"\"\"\n for item in self.__paths:\n\n if path.isfile(item):\n\n if not any(item.endswith(file_type) for file_type in self.__file_types):\n continue\n\n self.__storage.add_from_disk(item, path.join(self.__folder, item))\n\n elif path.isdir(item):\n\n if self.__check_sub_folders:\n for folder_name, _, filenames in walk(item):\n for filename in filenames:\n\n if not any(filename.endswith(file_type) for file_type in self.__file_types):\n continue\n\n self.__storage.add_from_disk(path.join(folder_name, filename), path.join(self.__folder, filename))\n else:\n for filename in listdir(item):\n\n if not any(filename.endswith(file_type) for file_type in self.__file_types):\n continue\n\n self.__storage.add_from_disk(path.join(item, filename), path.join(self.__folder, filename))\n\n def run(self) -> List:\n \"\"\"\n Launches the grabber module.\n\n Parameters:\n - None.\n\n Returns:\n - None.\n \"\"\"\n try:\n\n self.__grab_files()\n\n return self.__storage.get_data()\n\n except Exception as e:\n print(f\"[Grabber]: {repr(e)}\")\n","repo_name":"user-sspmynxdvb/stink","sub_path":"stink/utils/grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"24393304077","text":"money = ['Q', 'D', 'N', 'P']\r\nmoney_cnt = [0, 0, 0, 0]\r\nunit = [25, 10, 5, 1]\r\n\r\nT = int(input())\r\nfor t in range(1, T+1):\r\n amount = int(input())\r\n for i in range(4):\r\n money_cnt[i], amount = divmod(amount, unit[i])\r\n print(*money_cnt)","repo_name":"andtomorrow/algorithm","sub_path":"백준/Bronze/2720. 세탁소 사장 동혁/세탁소 사장 동혁.py","file_name":"세탁소 사장 동혁.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41202710749","text":"#!/usr/bin/env python\nimport math \nimport numpy as np\nimport rospy\nimport matplotlib.pyplot as plt\nfrom std_msgs.msg import String\nfrom time import sleep\nfrom scipy import signal\ndata = np.genfromtxt(fname='/home/zgl/Desktop/xr1.txt');\ndata = data[1500:1600]\nWn = 4/214\nb, a = signal.butter(2, Wn, 'lowpass') #配置滤波器 8 表示滤波器的阶数\nr1 = np.array([1,2])\nr2 = np.array([3,4])\n\nr3 = np.concatenate((r1,r2),axis = None)\n\n\nprint(r3)\n\nfiltedData = signal.filtfilt(b, a, data) #data为要过滤的信号\nplt.plot(filtedData)\nplt.plot(data)\nplt.show()\nrospy.init_node('flow_visualization_listener', anonymous=True)\nrot = []\n\ndef GetRotation(data):\n global rot\n l = data.data\n li = list(l.split(\",\"))\n rot = [float(li[0]),float(li[1])]\n print(rot)\n \ndef listener():\n rospy.Subscriber(\"rotation\", String, GetRotation, queue_size=10)\n \n rospy.spin()\n \nif __name__ == '__main__':\n listener()\n\n","repo_name":"Guanlan-gkd/finger_vision_master","sub_path":"scripts/mpu_tes.py","file_name":"mpu_tes.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11482022393","text":"import components\nimport constants\nimport heapq\nfrom collections import defaultdict\nfrom operator import itemgetter\n\n#building a trainset from the dataset\ntrainset = components.load_model(constants.TRAINSET)\n\n\n#loading the trained model (matrix)\nmodel_filename = constants.KNNBASICMODEL\nsimilarity_matrix = components.load_model(model_filename)\n\nuser_id = int(input(\"enter the user id \\n\"))\n\n#calculating by using 20 nearest neighbors\nk = 20\n\n#finding the top 20 rated movies by user\ntest_subject_IID = trainset.to_inner_uid(user_id)\ntest_subject_ratings = trainset.ur[test_subject_IID]\nk_neighbours = heapq.nlargest(k, test_subject_ratings, key= lambda x: x[1])\n\n#will thrwo keyerror if we use a normal dictionary since we cannot search with a non-existent key in a normal dict\n#finding similarities of each element in k_neighbours and storing them by assigning each of them a score\n#to improvde the accuracy of the score modifying the default score as score*(rating/5.0) \ncandidates = defaultdict(float)\n\nfor itemID , rating in k_neighbours:\n similarities = similarity_matrix[itemID]\n for innerID, score in enumerate(similarities):\n candidates[innerID] += score*(rating / 5.0)\n\nwatched = []\nfor itemID, rating in trainset.ur[test_subject_IID]:\n watched.append(itemID)\n\nrecommendation = []\n\nposition = 0\n\n\n#candidates have a structure of innerid : score hence we need to sort candidates descending order of score\nfor itemID,_ in sorted(candidates.items(), key=itemgetter(1), reverse=True):\n if not itemID in watched:\n recommendation.append(components.movieid_to_name(trainset.to_raw_iid(itemID)))\n position+=1\n if ( position > 10) : break\n\nfor rec in recommendation:\n print(\"Movie :\", rec)","repo_name":"AjiLeight/movielens-recommendation-tries","sub_path":"FIndSimilaritiesKNNBasic.py","file_name":"FIndSimilaritiesKNNBasic.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12299941015","text":"from django.urls import path, include\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\napp_name = 'HOD'\nurlpatterns = [\n path('/', include([\n path('addFaculty', views.addFaculty.as_view(), name=\"addFaculty\"),\n path('HODHome', views.HODHome, name=\"HODHome\"),\n path('addCourseExitSurvey', views.addCourseES.as_view(), name=\"addCourseES\"),\n path('CourseExitSurveys', views.courseES.as_view(), name=\"courseES\"),\n path('addDepartmentFeedbackSurvey', views.addDeptFS.as_view(), name=\"addDepartmentFeedbackSurvey\"),\n path('DepartmentFeedbackSurveys', views.deptFS.as_view(), name=\"DepartmentFeedbackSurveys\"),\n path('addGraduationExitSurvey', views.addGradES.as_view(), name=\"addGraduationExitSurvey\"),\n path('GraduationExitSurveys', views.gradES.as_view(), name=\"GraduationExitSurveys\"),\n path('delete_items//', views.delete_items, name=\"delete_items\"),\n path('queries/', include([\n path('FDPqueries', views.FDPqueries.as_view(), name=\"FDPqueries\"),\n path('FDPqueryResult///', views.FDPqueryResult.as_view(), name=\"FDPqueryResult\"),\n path('Workshopqueries', views.Workqueries.as_view(), name=\"Workshopqueries\"),\n path('WorkshopqueryResult///', views.WorkshopqueryResult.as_view(), name=\"WorkshopqueryResult\"),\n path('Paperqueries', views.Paperqueries.as_view(), name=\"Paperqueries\"),\n path('PaperqueryResult//', views.PaperqueryResult.as_view(), name=\"PaperqueryResult\"),\n path('GuestLecturequeries', views.GLqueries.as_view(), name=\"GuestLecturequeries\"),\n path('GuestLecturequeryResult//', views.GuestLecturequeryResult.as_view(), name=\"GuestLecturequeryResult\"),\n path('OnlineCoursequeries', views.Coursequeries.as_view(), name=\"OnlineCoursequeries\"),\n path('OnlineCoursequeryResult//', views.OnlineCoursequeryResult.as_view(), name=\"OnlineCoursequeryResult\"),\n path('Webinarqueries', views.Webinarqueries.as_view(), name=\"Webinarqueries\"),\n path('WebinarqueryResult///', views.WebinarqueryResult.as_view(), name=\"WebinarqueryResult\"),\n path('Sttpqueries', views.Sttpqueries.as_view(), name=\"Sttpqueries\"),\n path('SttpqueryResult///', views.SttpqueryResult.as_view(), name=\"SttpqueryResult\"),\n ])),\n path('departmentQueries/', include([\n path('Paperqueries', views.PaperDeptQueries.as_view(), name=\"PaperDeptQueries\"),\n path('Paperqueriesresult/', views.PaperDeptQueriesResult.as_view(), name=\"PaperDeptQueriesResult\"),\n path('Fdpqueries', views.FdpDeptQueries.as_view(), name=\"FdpDeptQueries\"),\n path('Fdpqueriesresult/', views.FdpDeptQueriesResult.as_view(), name=\"FdpDeptQueriesResult\"),\n path('Workshopqueries', views.WorkDeptQueries.as_view(), name=\"WorkDeptQueries\"),\n path('Workshopqueriesresult/', views.WorkDeptQueriesResult.as_view(), name=\"WorkDeptQueriesResult\"),\n path('GuestLecturequeries', views.GlDeptQueries.as_view(), name=\"GlDeptQueries\"),\n path('GuestLecturequeriesresult/', views.GlDeptQueriesResult.as_view(), name=\"GlDeptQueriesResult\"),\n path('Webinarqueries', views.WebiDeptQueries.as_view(), name=\"WebiDeptQueries\"),\n path('Webinarqueriesresult/', views.WebiDeptQueriesResult.as_view(), name=\"WebiDeptQueriesResult\"),\n path('Coursequeries', views.CourseDeptQueries.as_view(), name=\"CourseDeptQueries\"),\n path('Coursequeriesresult/', views.CourseDeptQueriesResult.as_view(), name=\"CourseDeptQueriesResult\"),\n path('Sttpqueries', views.SttpDeptQueries.as_view(), name=\"SttpDeptQueries\"),\n path('Sttpqueriesresult/', views.SttpDeptQueriesResult.as_view(), name=\"SttpDeptQueriesResult\"),\n path('PEqueries', views.PEQueries.as_view(), name=\"PEQueries\"),\n path('PEQueryResult///', views.PEQueryResult.as_view(), name=\"PEQueryResult\"),\n ])),\n path('studentQueries/', include([\n path('completeSTUQuery',views.completeSTUQuery.as_view(),name='completeSTUQuery'),\n path('completeSTUResult/',views.completeSTUResult.as_view(),name='completeSTUResult'),\n path('Paperqueries', views.stuPaperQueries.as_view(), name=\"StudentPaperQueries\"),\n path('PaperqueryResult/', views.stuPaperQueryResult.as_view(), name=\"StudentPaperqueryResult\"),\n path('Coursequeries', views.stuOnlineCourseQueries.as_view(), name=\"StudentOnlineQueries\"),\n path('OnlineCoursequeryResult/', views.stuOnlineCourseQueryResult.as_view(),\n name=\"StudentCoursequeryResult\"),\n path('Competitionqueries', views.stuCompQueries.as_view(), name=\"StudentCompetitionQueries\"),\n path('CompetitionqueryResult/', views.stuCompQueryResult.as_view(),\n name=\"StudentCompetitionqueryResult\"),\n\n path('GREqueries', views.GREQueries.as_view(), name=\"GREQueries\"),\n path('GREqueryResult/', views.GREQueryResult.as_view(), name=\"GREQueryResult\"),\n path('studentsUpdated', views.studentsUpdated.as_view(), name=\"studentsUpdated\"),\n path('studentsUpdatedResult/', views.studentsUpdatedResult.as_view(), name=\"studentsUpdatedResult\"),\n path('studentsNotUpdated', views.studentsNotUpdated.as_view(), name=\"studentsNotUpdated\"),\n path('studentsNotUpdatedResult/', views.studentsNotUpdatedResult.as_view(), name=\"studentsNotUpdatedResult\"),\n path('TOEFLqueries', views.TOEFLQueries.as_view(), name=\"TOEFLQueries\"),\n path('TOEFLqueryResult/', views.TOEFLQueryResult.as_view(), name=\"TOEFLQueryResult\"),\n path('GATEqueries', views.GATEQueries.as_view(), name=\"GATEQueries\"),\n path('GATEqueryResult/', views.GATEQueryResult.as_view(), name=\"GATEQueryResult\"),\n path('Startupqueries', views.StartupQueries.as_view(), name=\"StartupQueries\"),\n path('StartupqueryResult/', views.StartupQueryResult.as_view(), name=\"StartupQueryResult\"),\n path('Internshipqueries', views.InternshipQueries.as_view(), name=\"InternshipQueries\"),\n path('InternshipqueryResult//', views.InternshipQueryResult.as_view(),\n name=\"InternshipQueryResult\"),\n path('Placementqueries', views.PlacementQueries.as_view(), name=\"PlacementQueries\"),\n path('PlacementqueryResult/', views.PlacementQueryResult.as_view(), name=\"PlacementQueryResult\"),\n path('Projectqueries', views.ProjectQueries.as_view(), name=\"ProjectQueries\"),\n path('SocialProjectqueryResult/', views.ProjectQueryResult.as_view(), name=\"SocialProjectQueryResult\"),\n path('Graduationqueries', views.GraduationQueries.as_view(), name=\"GraduationQueries\"),\n path('GraduationQueryResult/-', views.GraduationQueryResult.as_view(), name=\"GraduationQueryResult\"),\n path('PEStuqueries', views.PEStuQueries.as_view(), name=\"PEStuQueries\"),\n path('PEStuQueryResult////', views.PEStuQueryResult.as_view(), name=\"PEStuQueryResult\"),\n ])),\n ])),\n]\n\n","repo_name":"veerjakadam2/CoeusFinalDraft","sub_path":"HOD/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38825312313","text":"\"\"\" Test for the detector1 pipeline using NIRISS image mode, starting with\n an uncal file. The charge_migration and ramp fitting output\n products are saved for comparisons for those two steps.\n\"\"\"\n\nimport pytest\nfrom astropy.io.fits.diff import FITSDiff\n\nfrom jwst.stpipe import Step\n\n\n@pytest.fixture(scope=\"module\")\ndef run_detector1(rtdata_module):\n \"\"\"Run calwebb_detector1 pipeline on NIRISS imaging data.\"\"\"\n rtdata = rtdata_module\n\n rtdata.get_data(\"niriss/jw01094001002_02107_00001_nis_uncal.fits\")\n\n # Run detector1 pipeline on an _uncal files\n args = [\"calwebb_detector1\", rtdata.input,\n \"--steps.charge_migration.skip=False\",\n \"--steps.charge_migration.save_results=True\",\n \"--steps.ramp_fit.save_results=True\",\n \"--steps.persistence.save_trapsfilled=False\",\n ]\n\n Step.from_cmdline(args)\n\n\n@pytest.mark.bigdata\n@pytest.mark.parametrize(\"suffix\", [\"charge_migration\", \"rate\", \"rateints\"])\ndef test_niriss_image_detector1(run_detector1, rtdata_module, fitsdiff_default_kwargs, suffix):\n \"\"\"Regression test of detector1 pipeline performed on NIRISS imaging data.\n \"\"\"\n _assert_is_same(rtdata_module, fitsdiff_default_kwargs, suffix)\n\n\ndef _assert_is_same(rtdata_module, fitsdiff_default_kwargs, suffix):\n \"\"\"Assertion helper for the above tests\"\"\"\n rtdata = rtdata_module\n rtdata.input = \"jw01094001002_02107_00001_nis_uncal.fits\"\n output = f\"jw01094001002_02107_00001_nis_{suffix}.fits\"\n rtdata.output = output\n\n rtdata.get_truth(f\"truth/test_niriss_image/{output}\")\n\n # Set tolerances so the crf, rscd and rateints file comparisons work across\n # architectures\n fitsdiff_default_kwargs[\"rtol\"] = 1e-4\n fitsdiff_default_kwargs[\"atol\"] = 1e-4\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n","repo_name":"spacetelescope/jwst","sub_path":"jwst/regtest/test_niriss_image.py","file_name":"test_niriss_image.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"62"} +{"seq_id":"19171741826","text":"# Kullanıcıdan 5 sayı iste, bunların ortalamasını hesapla ve ortalamaya en yakın sayıyı(5 sayı içinden) ekrana yazdır\n# ( Daha düzgün çözüm versiyonu )\n\nsayilar = []\ntoplam = 0\n\n# Kullanıcıdan 5 adet sayı girişi al ve bunları 'sayilar' listesine ekle.\nfor _ in range(5):\n sayi = int(input(\"Sayı gir: \"))\n sayilar.append(sayi)\n toplam += sayi\n\n# 5 sayının ortalamasını hesapla.\nortalama = toplam / 5\n\n# Sayıları küçükten büyüğe sırala.\nsayilar.sort()\n\n# Ortalamaya en yakın sayıyı bul.\nen_yakin_sayi = sayilar[0]\nfor sayi in sayilar:\n if abs(ortalama - sayi) < abs(ortalama - en_yakin_sayi):\n en_yakin_sayi = sayi\n\n# Sonuçları ekrana yazdır.\nprint(\"Sayılar:\", sayilar)\nprint(\"Ortalama:\", ortalama)\nprint(\"Ortalama en yakın sayı:\", en_yakin_sayi)\n","repo_name":"ced3j/simple-python","sub_path":"çıkmış sorular/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40700793771","text":"#\n# @lc app=leetcode id=53 lang=python3\n#\n# [53] Maximum Subarray\n#\n\n# @lc code=start\nclass Solution:\n def maxSubArray(self, nums):\n # 扫描法:\n # 从列表第一个元素开始遍历\n # 分别保存至此最大值(maxval)和以该元素为终点的列表的值(res)\n # 如果res<0, 那么该部分的值对于后边列表的总和增大没有帮助, 重置res\n # maxval = max(maxval, res)\n if len(nums) == 1:\n return nums[0]\n else:\n i = 1\n curr = nums[0]\n maxval = nums[0]\n while i < len(nums):\n if curr < 0:\n curr = nums[i]\n else:\n curr += nums[i]\n maxval = max(curr, maxval)\n i+= 1\n return maxval\n \n# @lc code=end\ndef maxSubArray(nums):\n # 动态规划:\n # 定义sum[i]为: 以i为结点的最长字符串的和\n # 以nums[i]为结尾的最大子序列和只能是以下两种情况之一:\n # 1. 如果以nums[i-1]为结尾的最大和子序列>0: sum[i] = sum[i - 1] + nums[i]\n # 2. 如果以nums[i-1]为结尾的最大和子序列<0: sum[i] = nums[i]\n # -> 状态转移方程为:sum[i] = max(sum[i - 1] + nums[i], nums[i])\n maxval = nums[0]\n sums = nums[0]\n for i in range(len(nums)):\n if sums <= 0:\n sums = nums[i]\n else: \n sums = sums + nums[i]\n maxval = max(sums, maxval)\n return maxval\n\n\n\ns = Solution()\ns.maxSubArray([-2,-1,3,9,-9, 15,-3,20])\n\nmaxSubArray([-2,-1,3,9,-9, 15,-3,20])","repo_name":"RyanYin04/LeetCode","sub_path":"53.maximum-subarray.py","file_name":"53.maximum-subarray.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8457646677","text":"from __future__ import absolute_import, unicode_literals\nimport logging\nimport sys\n\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n\n\ndef show_help():\n logging.info(\"Check_shape.py -option [-option] \")\n logging.info(\" -geometry Geometry file \")\n logging.info(\" -vis Enable visualization \")\n logging.info(\" -batch Batch execution \")\n\n\ndef run():\n cnt = 0\n geo = None\n vis = False\n batch = False\n for i in sys.argv:\n cnt = cnt + 1\n c = i.upper()\n if c.find('BATCH') < 2 and c.find('BATCH') >= 0:\n batch = True\n elif c[:4] == '-GEO':\n geo = sys.argv[cnt]\n elif c[:4] == '-VIS':\n vis = True\n\n if not geo:\n show_help()\n sys.exit(1)\n\n import DDG4\n kernel = DDG4.Kernel()\n # Configure UI\n geant4 = DDG4.Geant4(kernel, tracker='Geant4TrackerCombineAction')\n if batch:\n ui = geant4.setupCshUI(typ=None, ui=None, vis=None)\n kernel.UI = 'UI'\n else:\n ui = geant4.setupCshUI(vis=vis)\n kernel.loadGeometry(geo)\n # Configure field\n geant4.setupTrackingField(prt=True)\n # Now build the physics list:\n geant4.setupPhysics('')\n kernel.physicsList().enableUI()\n DDG4.setPrintLevel(DDG4.OutputLevel.DEBUG)\n #\n ui.Commands = [\n '/ddg4/ConstructGeometry/printVolume /world_volume_1',\n '/ddg4/ConstructGeometry/printMaterial Air',\n '/ddg4/ConstructGeometry/printMaterial Vacuum',\n '/ddg4/UI/exit'\n ]\n kernel.NumEvents = 0\n kernel.configure()\n kernel.initialize()\n kernel.run()\n kernel.terminate()\n\n\n# Main entry point:\nif __name__ == \"__main__\":\n run()\n","repo_name":"AIDASoft/DD4hep","sub_path":"examples/ClientTests/scripts/Check_Air.py","file_name":"Check_Air.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"62"} +{"seq_id":"35035683266","text":"def dec_to_base(num,base): #Maximum base - 36\n base_num = \"\"\n while num>0:\n dig = int(num%base)\n if dig<10:\n base_num += str(dig)\n else:\n base_num += chr(ord('A')+dig-10) #Using uppercase letters\n num //= base\n\n base_num = base_num[::-1] #To reverse the string\n return base_num\n\ndef binary_to_octal(binary):\n binary = \"\".join(binary)\n zeros = len(binary) - len(binary.lstrip('0'))\n x = dec_to_base(int(binary, 2), 4) \n x = \"\".join( [ str(int(i)*3) for i in x ] ) \n if x != \"0\" : octal = \"0\"*zeros + x #.zfill(len(binary)//3*3)\n else : octal = \"0\"*zeros \n return [ int(i) for i in octal ] \n\ndef octal_to_binary(octal):\n octal = \"\".join(octal)\n zeros = len(octal) - len(octal.lstrip('0'))\n d = [0,0,1,1,1,2,2,2,3,3,3]\n x = [ str( d[int(i)] ) for i in octal ]\n x = format(int( \"\".join(x), 4), 'b')\n if x != \"0\" : binary = \"0\"*zeros + x #.zfill(len(octal)*3)\n else : binary = \"0\"*zeros\n return binary\n\n#binary_to_octal(\"0000101\")\n#octal_to_binary(x)\n","repo_name":"Venkatesh2304/asjkdh","sub_path":"PHY/oct_conv.py","file_name":"oct_conv.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7497596600","text":"import sys\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QCoreApplication\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nfrom commons.Constant import const\nfrom commons.GetCOVIDData import getCountryData\nfrom draw.DistrictChart import countryDataByRegion, drawChartByRegion\nfrom draw.DramMapByRegion import dramMapByRegion\nfrom draw.DrawRegionalComparisonHistogram import drawChartColumn\nfrom draw.StatisticalHighFrequencyWord import dramMapHotWords\n\n\ndef runCountryDataByRegion():\n countryDataByRegion(countryData, region)\n pass\n\n\ndef runDrawChartByRegion():\n drawChartByRegion(countryData, region)\n pass\n\n\ndef runDrawChartColumn():\n drawChartColumn(countryData, region)\n pass\n\n\ndef getApp():\n return QApplication(sys.argv)\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.setFixedSize(682, 340)\n MainWindow.setWindowIcon(QIcon(const.WINDOW_ICO_PATH))\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(9, 20, 664, 27))\n font = QtGui.QFont()\n font.setPointSize(20)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralwidget)\n self.plainTextEdit.setGeometry(QtCore.QRect(10, 70, 661, 51))\n font = QtGui.QFont()\n font.setPointSize(20)\n self.plainTextEdit.setFont(font)\n self.plainTextEdit.setObjectName(\"plainTextEdit\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(10, 210, 321, 41))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.pushButton.setFont(font)\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(350, 150, 321, 41))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.pushButton_2.setFont(font)\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(10, 150, 321, 41))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.pushButton_3.setFont(font)\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_4.setGeometry(QtCore.QRect(350, 210, 321, 41))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.pushButton_4.setFont(font)\n self.pushButton_4.setObjectName(\"pushButton_4\")\n self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_5.setGeometry(QtCore.QRect(10, 270, 321, 41))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.pushButton_5.setFont(font)\n self.pushButton_5.setObjectName(\"pushButton_5\")\n self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_6.setGeometry(QtCore.QRect(350, 270, 321, 41))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.pushButton_6.setFont(font)\n self.pushButton_6.setObjectName(\"pushButton_6\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"疫情爬虫及可视化项目\"))\n self.label.setText(_translate(\"MainWindow\", \"Python 地区(四川)疫情爬虫与新闻高频词云可视化项目\"))\n self.plainTextEdit.setPlainText(_translate(\"MainWindow\", \"项目提供以下功能,点击功能按钮进行运行\"))\n self.pushButton.setText(_translate(\"MainWindow\", \" 绘制地区各城市分类柱状图\"))\n self.pushButton.setStyleSheet('''\n color:white;\n\t background-color: black;\n\t text-align: center;\n\t border-radius:5px\n ''')\n self.pushButton.clicked.connect(runCountryDataByRegion)\n self.pushButton_2.setText(_translate(\"MainWindow\", \"绘制地区各城市疫情对比柱状图\"))\n self.pushButton_2.clicked.connect(runDrawChartColumn)\n self.pushButton_3.setText(_translate(\"MainWindow\", \"绘制地区各城市感染总数柱状图\"))\n self.pushButton_3.clicked.connect(runDrawChartByRegion)\n self.pushButton_4.setText(_translate(\"MainWindow\", \"绘制地区疫情统计地图\"))\n self.pushButton_4.clicked.connect(self.runDramMapByRegion)\n self.pushButton_5.setText(_translate(\"MainWindow\", \"绘制疫情热门词汇展示\"))\n self.pushButton_5.clicked.connect(self.runDramMapHotWords)\n self.pushButton_6.setText(_translate(\"MainWindow\", \"退出项目\"))\n self.pushButton_6.clicked.connect(QCoreApplication.instance().quit)\n pass\n\n def runDramMapByRegion(self):\n dramMapByRegion(countryData, region)\n self.plainTextEdit.setPlainText(\"生成成功,前往 {}文件夹下查看网页地图\".format(const.SAVE_MAP_PATH))\n pass\n\n def runDramMapHotWords(self):\n dramMapHotWords(const.SAVE_TXT_PATH)\n self.plainTextEdit.setPlainText(\"生成成功,前往 {}文件夹下查看词云地图\".format(const.SAVE_MAP_PATH))\n pass\n\n\nif __name__ == \"__main__\":\n countryData = getCountryData(const.REQUEST_URL)\n region = '四川'\n app = getApp()\n MainWindow = QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"xiaoandx/reptile","sub_path":"src/file/ui/WindowControl v1.0.py","file_name":"WindowControl v1.0.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"62"} +{"seq_id":"70113238278","text":"#!/usr/bin/env python\n\"\"\"active failover base testsuite\"\"\"\nimport pprint\nimport time\nfrom selenium_ui_test.test_suites.base_selenium_test_suite import BaseSeleniumTestSuite\nfrom selenium_ui_test.pages.navbar import NavigationBarPage\n\nfrom selenium_ui_test.pages.replication_page import ReplicationPage\n\n\nclass ActiveFailoverBaseTestSuite(BaseSeleniumTestSuite):\n \"\"\"testsuite to be run to check the follower count\"\"\"\n\n def check_follower_count(self, expect_follower_count=2, retry_count=10):\n \"\"\"check the integrity of the old system after the install\"\"\"\n while retry_count > 0:\n NavigationBarPage(self.webdriver, self.cfg).navbar_goto(\"replication\")\n replication_page = ReplicationPage(self.webdriver, self.cfg)\n replication_table = replication_page.get_replication_screen(True)\n print(replication_table)\n if len(replication_table[\"follower_table\"]) != expect_follower_count + 1:\n time.sleep(5)\n retry_count -= 1\n else:\n retry_count = 0 # its there!\n # head and two followers should be there:\n self.progress(\n \" expecting %d followers, have %d followers\"\n % (expect_follower_count, len(replication_table[\"follower_table\"]) - 1)\n )\n self.ui_assert(\n len(replication_table[\"follower_table\"]) == expect_follower_count + 1,\n \"UI-Test:\\nexpect 1 follower in:\\n%s\" % pprint.pformat(replication_table),\n )\n \n def check_replication_tab(self):\n \"\"\"checking replication tab information\"\"\"\n replication_page = ReplicationPage(self.webdriver, self.cfg)\n replication_page.get_replication_information()\n","repo_name":"arangodb/release-test-automation","sub_path":"release_tester/selenium_ui_test/test_suites/activefailover/active_failover_base_suite.py","file_name":"active_failover_base_suite.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"62"} +{"seq_id":"73774971078","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\n\n\nTHEME_COLOR = \"#375362\"\n\n\n\nclass QuizInterface:\n\n\n def __init__(self,quiz_brain: QuizBrain ): # Makes to so it knows what quiz_brain is from and only takes those inputs from there\n self.quiz = quiz_brain\n self.window = Tk()\n self.window.title(\"Quizzler\")\n self.window.config(padx=20,pady=20,bg=THEME_COLOR)\n\n self.text = Label(text=\"Score: 0\",background=THEME_COLOR,fg='white',font=('bald'))\n self.text.grid(row=0,column=1)\n\n self.canvas = Canvas(width=300,height=250)\n self.question_text = self.canvas.create_text(150,125,width=280,text=\"Some Question Text\",fill=THEME_COLOR,font=(\"Arial\",20,'italic'))\n self.canvas.grid(row=1,column=0,columnspan=2,pady=50)\n\n true_image = PhotoImage(file='images/true.png')\n self.true_button = Button(image=true_image,highlightthickness=0,command=self.false)\n self.true_button.grid(row=2,column=0)\n\n false_image = PhotoImage(file='images/false.png')\n self.false_button = Button(image=false_image,highlightthickness=0,command=self.true)\n self.false_button.grid(row=2,column=1)\n self.get_next_question()\n\n self.window.mainloop()\n\n\n def get_next_question(self):\n self.canvas.config(bg='white')\n if self.quiz.still_has_questions():\n self.text.config(text=f\"Score: {self.quiz.score}\")\n q_test = self.quiz.next_question() # LOOK ABOVE FOR COMMENT EXPLAINED TO HOW SHOW THIS\n self.canvas.itemconfig(self.question_text,text=q_test)\n else:\n self.canvas.itemconfig(self.question_text, text=\"Youve reached the end goodbye bia\")\n self.true_button.config(state='disabled') # DISABLES BUTTONS\n self.false_button.config(state='disabled')\n\n def true(self):\n self.give_feedback(self.quiz.check_answer(\"True\")) # same line\n\n def false(self):\n is_right = self.quiz.check_answer(\"False\")\n self.give_feedback(is_right) # same line\n def give_feedback(self,is_right):\n if is_right: # Ja nav true answer\n self.canvas.config(bg='green',highlightthickness=0)\n else:\n self.canvas.config(bg='red',highlightthickness=0)\n self.window.after(1000,self.get_next_question)\n\n\n\n\n\n\n\n","repo_name":"LukaTm/python-projects","sub_path":"day34-quiz/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2791358140","text":"import RPi.GPIO as GPIO\r\n\r\nbutton_pin = 7\r\n\r\ndef setup():\r\n GPIO.setwarnings(False) # Ignore warning for now\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # initialize button pin\r\n\r\n\r\ndef button_callback(channel):\r\n print(\"Button was pushed!\")\r\n\r\nsetup()\r\n\r\nGPIO.add_event_detect(button_pin, GPIO.RISING, callback=button_callback) # Setup event on pin 10 rising edge\r\nmessage = input(\"Press enter to quit\\n\\n\")\r\nGPIO.cleanup() # Clean up\r\n","repo_name":"MatthewPerryman/recycle_robot","sub_path":"Server_Package/PiCode/rpiWebServer/test_code/test_button.py","file_name":"test_button.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"20315618761","text":"from io import StringIO\n\nfrom django.core.management import call_command\nfrom django.test import TestCase\n\nfrom ..management.commands import listusedprofilefields\nfrom ..test import create_test_user\n\n\nclass ListUsedProfileFieldsTests(TestCase):\n def test_no_fields_set(self):\n \"\"\"utility has no showstoppers when no fields are set\"\"\"\n create_test_user(\"User\", \"user@example.com\")\n\n out = StringIO()\n call_command(listusedprofilefields.Command(), stdout=out)\n command_output = out.getvalue().splitlines()[0].strip()\n\n self.assertEqual(command_output, \"No profile fields are currently in use.\")\n\n def test_fields_set(self):\n \"\"\"utility lists number of users that have different fields set\"\"\"\n create_test_user(\n \"User1\",\n \"user1@example.com\",\n profile_fields={\"gender\": \"male\", \"bio\": \"Yup!\"},\n )\n create_test_user(\n \"User2\", \"user2@example.com\", profile_fields={\"gender\": \"male\"}\n )\n create_test_user(\"User3\", \"user3@example.com\", profile_fields={\"location\": \"\"})\n\n out = StringIO()\n call_command(listusedprofilefields.Command(), stdout=out)\n command_output = [l.strip() for l in out.getvalue().strip().splitlines()]\n\n self.assertEqual(command_output, [\"bio: 1\", \"gender: 2\", \"location: 1\"])\n","repo_name":"rafalp/Misago","sub_path":"misago/users/tests/test_listusedprofilefields.py","file_name":"test_listusedprofilefields.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":2396,"dataset":"github-code","pt":"62"} +{"seq_id":"42223250283","text":"import os.path, re\nimport wx\nimport drScrolledMessageDialog\nimport drShortcutsFile\n\nimport config, EpyGlob\nimport utils\n\ndef GetPopUpMenuLabels(filename, frame):\n try:\n f = file(filename, 'r')\n text = f.read()\n f.close()\n except:\n drScrolledMessageDialog.ShowMessage(frame, 'File error with: \"' + filename + '\".', \"ERROR\")\n return []\n\n rePopUpMenu = re.compile(r'^\\s*?DrFrame\\.AddPluginPopUpMenuFunction\\(.*\\)', re.MULTILINE)\n\n allPopUps = rePopUpMenu.findall(text)\n\n PopUpArray = []\n\n for s in allPopUps:\n #From the Left most '('\n start = s.find('(')\n #To the Right most ')'\n end = s.rfind(')')\n\n if (start > -1) and (end > -1):\n s = s[start+1:end]\n i = s.find(',')\n e = i + 1 + s[i+1:].find(',')\n arglabel = s[i+1:e].strip().strip('\"')\n\n PopUpArray.append(\":\"+arglabel)\n\n return PopUpArray\n\nclass drPopUpMenuDialog(wx.Dialog):\n\n def __init__(self, parent):\n wx.Dialog.__init__(self, parent, -1, (\"Customize Pop Up Menu\"), wx.DefaultPosition, (-1, -1), wx.DEFAULT_DIALOG_STYLE | wx.THICK_FRAME)\n\n wx.Yield()\n\n self.ID_PROGRAM = 1001\n self.ID_POPUP = 1002\n\n self.ID_LIST = 1300\n\n self.ID_ADD = 1003\n self.ID_REMOVE = 1004\n self.ID_UPDATE = 1005\n self.ID_SAVE = 1006\n\n self.ID_UP = 1111\n self.ID_DOWN = 2222\n\n self.parent = parent\n\n self.theSizer = wx.FlexGridSizer(0, 4, 5, 10)\n self.menubuttonSizer = wx.BoxSizer(wx.VERTICAL)\n self.listSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.AppDataDir = parent.AppDataDir\n\n popupmenulist = []\n\n map(popupmenulist.append, parent.popupmenulist)\n\n if not popupmenulist:\n popupmenulist = [\"\", \"Undo\", \"Redo\", \"\", \"Cut\", \"Copy\", \"Paste\", \"Delete\", \"\", \"Select All\"]\n else:\n popupmenulist.insert(0, \"\")\n\n programmenulist = drShortcutsFile.GetShortcutList()\n\n programmenulist.sort()\n\n programmenulist.insert(0, \"\")\n\n self.ListArray = []\n self.ListArray.append(programmenulist)\n\n #STC\n\n stclist = []\n map(stclist.append, drShortcutsFile.GetSTCShortcutList())\n stclist.insert(0, \"\")\n self.ListArray.append(stclist)\n\n #DrScript\n\n drscriptlist = []\n map(drscriptlist.append, parent.drscriptmenu.titles)\n x = 0\n l = len(drscriptlist)\n while x < l:\n drscriptlist[x] = \":\" + drscriptlist[x]\n x = x + 1\n drscriptlist.insert(0, \"\")\n\n self.ListArray.append(drscriptlist)\n\n #Plugins\n plist = os.listdir(parent.pluginsdirectory)\n\n self.PluginList = []\n plugins = []\n for p in plist:\n i = p.find(\".py\")\n l = len(p)\n if i > -1 and (i + 3 == l):\n self.PluginList.append(\":\" + p[:i])\n plugins.append(p[:i])\n\n poplist = []\n for plugin in plugins:\n pluginfile = os.path.join(self.parent.pluginsdirectory, plugin + \".py\")\n pluginlist = GetPopUpMenuLabels(pluginfile, self)\n plist = self.parent.GetPluginLabels(pluginfile)\n for p in plist:\n if not (p in pluginlist):\n pluginlist.append(p)\n if pluginlist:\n pluginlist.insert(0, \"\")\n self.ListArray.append(pluginlist)\n else:\n poplist.append(\":\" + plugin)\n\n for popl in poplist:\n i = self.PluginList.index(popl)\n self.PluginList.pop(i)\n\n list = [\"Standard\", \"Text Control\", \"DrScript\"]\n list.extend(self.PluginList)\n\n self.cboList = wx.ComboBox(self, self.ID_LIST, \"Standard\", wx.DefaultPosition, (200, -1), list, wx.CB_DROPDOWN|wx.CB_READONLY)\n\n self.programmenu = wx.ListBox(self, self.ID_PROGRAM, wx.DefaultPosition, (250, 300), programmenulist)\n\n self.popupmenu = wx.ListBox(self, self.ID_POPUP, wx.DefaultPosition, (250, 300), popupmenulist)\n\n self.btnUp = wx.Button(self, self.ID_UP, \" Up \")\n self.btnAdd = wx.Button(self, self.ID_ADD, \" ---> \")\n self.btnRemove = wx.Button(self, self.ID_REMOVE, \" Remove \")\n self.btnDown = wx.Button(self, self.ID_DOWN, \" Down \")\n\n self.menubuttonSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.menubuttonSizer.Add(self.btnAdd, 0, wx.SHAPED)\n self.menubuttonSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.menubuttonSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.menubuttonSizer.Add(self.btnUp, 0, wx.SHAPED)\n self.menubuttonSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.menubuttonSizer.Add(self.btnDown, 0, wx.SHAPED)\n self.menubuttonSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.menubuttonSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.menubuttonSizer.Add(self.btnRemove, 0, wx.SHAPED)\n\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n\n self.listSizer.Add(wx.StaticText(self, -1, \"List: \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.listSizer.Add(self.cboList, 0, wx.ALIGN_CENTER | wx.SHAPED)\n\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(self.listSizer, 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \"Current List:\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \" \"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \"Pop Up Menu:\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(self.programmenu, 0, wx.SHAPED | wx.ALIGN_CENTER)\n self.theSizer.Add(self.menubuttonSizer, 0, wx.SHAPED | wx.ALIGN_CENTER)\n self.theSizer.Add(self.popupmenu, 0, wx.SHAPED | wx.ALIGN_CENTER)\n\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n\n self.btnUpdate = wx.Button(self, self.ID_UPDATE, \"&Update\")\n self.btnSave = wx.Button(self, self.ID_SAVE, \"&Save\")\n\n self.btnClose = wx.Button(self, 101, \"&Close\")\n self.theSizer.Add(wx.StaticText(self, -1, \"\"), 0, wx.ALIGN_CENTER | wx.SHAPED)\n self.theSizer.Add(self.btnClose, 0, wx.SHAPED | wx.ALIGN_CENTER)\n self.theSizer.Add(self.btnUpdate, 0, wx.SHAPED | wx.ALIGN_CENTER)\n self.theSizer.Add(self.btnSave, 0, wx.SHAPED | wx.ALIGN_CENTER)\n self.btnClose.SetDefault()\n\n self.SetAutoLayout(True)\n self.SetSizerAndFit(self.theSizer)\n\n self.Bind(wx.EVT_BUTTON, self.OnbtnUp, id=self.ID_UP)\n self.Bind(wx.EVT_BUTTON, self.OnbtnAdd, id=self.ID_ADD)\n self.Bind(wx.EVT_BUTTON, self.OnbtnRemove, id=self.ID_REMOVE)\n self.Bind(wx.EVT_BUTTON, self.OnbtnDown, id=self.ID_DOWN)\n self.Bind(wx.EVT_BUTTON, self.OnbtnUpdate, id=self.ID_UPDATE)\n self.Bind(wx.EVT_BUTTON, self.OnbtnSave, id=self.ID_SAVE)\n self.Bind(wx.EVT_BUTTON, self.OnbtnClose, id=101)\n\n self.Bind(wx.EVT_COMBOBOX, self.OnList, id=self.ID_LIST)\n\n utils.LoadDialogSizeAndPosition(self, 'popupmenudialog.sizeandposition.dat')\n\n def OnCloseW(self, event):\n utils.SaveDialogSizeAndPosition(self, 'popupmenudialog.sizeandposition.dat')\n if event is not None:\n event.Skip()\n\n def OnbtnAdd(self, event):\n tselection = self.programmenu.GetStringSelection()\n tsel = self.programmenu.GetSelection()\n if tsel == -1:\n drScrolledMessageDialog.ShowMessage(self, \"Nothing Selected to Add\", \"Mistake\")\n return\n\n sel = self.popupmenu.GetSelection()\n if sel == -1:\n sel = 0\n\n separator = (tselection == \"\")\n if separator:\n tselection = \"\"\n\n self.popupmenu.InsertItems([tselection], sel+1)\n self.popupmenu.SetSelection(sel+1)\n\n def OnbtnClose(self, event):\n self.Close(1)\n\n def OnbtnDown(self, event):\n sel = self.popupmenu.GetSelection()\n if sel < self.popupmenu.GetCount()-1 and sel > 0:\n txt = self.popupmenu.GetString(sel)\n self.popupmenu.Delete(sel)\n self.popupmenu.InsertItems([txt], sel+1)\n self.popupmenu.SetSelection(sel+1)\n\n def OnbtnRemove(self, event):\n sel = self.popupmenu.GetSelection()\n if not sel:\n drScrolledMessageDialog.ShowMessage(self, \"You cannot remove the root item.\", \"Mistake\")\n return\n if sel == -1:\n drScrolledMessageDialog.ShowMessage(self, \"Nothing Selected to Remove\", \"Mistake\")\n return\n\n self.popupmenu.Delete(sel)\n self.popupmenu.SetSelection(sel-1)\n\n def OnbtnUp(self, event):\n sel = self.popupmenu.GetSelection()\n if sel > 1:\n txt = self.popupmenu.GetString(sel)\n self.popupmenu.Delete(sel)\n self.popupmenu.InsertItems([txt], sel-1)\n self.popupmenu.SetSelection(sel-1)\n\n def OnbtnUpdate(self, event):\n y = 0\n c = self.popupmenu.GetCount()\n\n popupmenulist = []\n\n\n while y < c:\n pop = self.popupmenu.GetString(y)\n if not pop == \"\":\n popupmenulist.append(pop)\n y = y + 1\n\n self.parent.popupmenulist = popupmenulist\n\n if config.prefs.enablefeedback:\n drScrolledMessageDialog.ShowMessage(self, (\"Succesfully updated the current instance of EasyPython.\\nClick Save to make it permanent.\"), \"Updated Pop Up Menu\")\n\n def OnbtnSave(self, event):\n y = 0\n c = self.popupmenu.GetCount()\n\n popupmenustring = \"\"\n popupmenulist = []\n\n while y < c:\n pop = self.popupmenu.GetString(y)\n if not pop == \"\":\n popupmenustring = popupmenustring + pop + \"\\n\"\n popupmenulist.append(pop)\n y = y + 1\n\n self.parent.popupmenulist = popupmenulist\n\n popupfile = self.AppDataDir + \"/popupmenu.dat\"\n try:\n f = file(popupfile, 'w')\n f.write(popupmenustring)\n f.close()\n except IOError:\n drScrolledMessageDialog.ShowMessage(self, (\"There were some problems writing to:\\n\" + popupfile + \"\\nEither the file is having metaphysical issues, or you do not have permission to write.\\nFor metaphysical issues, consult the documentation.\\nFor permission issues, change the permissions on the directory to allow yourself write access.\\nEasyPython will now politely ignore your request to save.\\nTry again when you have fixed the problem.\"), \"Write Error\")\n return\n if config.prefs.enablefeedback:\n drScrolledMessageDialog.ShowMessage(self, (\"Succesfully wrote to:\\n\" + popupfile + \"\\nand updated the current instance of EasyPython.\"), \"Saved Pop Up Menu\")\n\n def OnList(self, event):\n sel = self.cboList.GetSelection()\n\n self.programmenu.Set(self.ListArray[sel])\n\n\n","repo_name":"walker8088/easyworld","sub_path":"EasyPython/drPopUpMenuDialog.py","file_name":"drPopUpMenuDialog.py","file_ext":"py","file_size_in_byte":12267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15901573547","text":"import cv2\nimport numpy as np\nimport datetime\nimg1 = cv2.imread('loa.jpg')\ncap = cv2.VideoCapture(1)\nwht = 320\nconfThreshold = 0.5\nnmsThreshold = 0.3\n\nflag = True\nclassesFile = 'obj.names'\nclassNames = []\nwith open(classesFile,'rt') as f:\n classNames = f.read().rstrip('\\n').split('\\n')\n\n\nmodelConfig = 'yolov4-tiny-custom.cfg'\nmodelWeight = 'yolov4-tiny-custom.weights'\n\nnet = cv2.dnn.readNetFromDarknet(modelConfig,modelWeight)\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n\n\n\ndef findObjects(outputs,img):\n\n hT, wT, cT = img.shape\n bbox = []\n classIds = []\n confs = []\n for output in outputs:\n for det in output:\n scores = det[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n w,h = int(det[2]*wT) , int(det[3]*hT)\n x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)\n bbox.append([x,y,w,h])\n classIds.append(classId)\n confs.append(float(confidence))\n\n indices = cv2.dnn.NMSBoxes(bbox,confs,confThreshold,nms_threshold=nmsThreshold)\n print(indices)\n\n for i in indices:\n # global dem\n i = i[0]\n box= bbox[i]\n x,y,w,h = box[0],box[1],box[2],box[3]\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,255),2)\n timestr = str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)+\"_\"+str(now.date())\n print(timestr)\n cv2.imwrite('C:/Users/Administrator/Desktop/xu_li_anh/test/%r_%rh%rm%rs_%r.jpg' %(classNames[classIds[i]].upper(),now.hour,now.minute,now.second,now.date()),img)\n cv2.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%',\n (x,y-10),cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,0,255),2)\n\n#----------video,webcam\nwhile True:\n now = datetime.datetime.now()\n success, img = cap.read()\n img = cv2.flip(img,1)\n blob = cv2.dnn.blobFromImage(img,1/255,(wht,wht),[0,0,0],1,crop=True)\n net.setInput(blob)\n layerNames = net.getLayerNames()\n outputNames = [layerNames[i[0]-1] for i in net.getUnconnectedOutLayers()]\n outputs= net.forward(outputNames)\n findObjects(outputs,img)\n cv2.imshow('cam',img)\n\n if cv2.waitKey(1) & 0xFF == ord(' '):\n break\ncap.release()\n\n#------img\n# blob = cv2.dnn.blobFromImage(img1, 1 / 255, (wht, wht), [0, 0, 0], 1, crop=False)\n# net.setInput(blob)\n#\n# layerNames = net.getLayerNames()\n# outputNames = [layerNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n# outputs = net.forward(outputNames)\n# findObjects(outputs, img1)\n#\n# cv2.imshow('cam', img1)\n# cv2.waitKey(0)\n#----------\n\ncv2.destroyAllWindows()","repo_name":"manhdung0802/AI-ML-DL","sub_path":"yolov4-tiny-loa/yolov4.py","file_name":"yolov4.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30515057661","text":"\"\"\"mini_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from .views import RegisterAPI\nfrom django.contrib import admin\nfrom django.urls import path \nfrom .views import Destroyeapi, Retriveapi, UserDetailAPI,RegisterUserAPIView, getapi, postapi\nfrom rest_framework_simplejwt import views as jwt_views\nfrom .import views\nurlpatterns = [\n path(\"demoPage\",views.demoPage),\n path(\"demoPage2\",views.demoPage2),\n path('',views.loginUser,name=\"login\"),\n path('signup',views.signupUser,name=\"signup\"),\n path('signup_process',views.signup_process,name=\"signup_process\"),\n path('login_proces',views.login_proces,name=\"login_proces\"),\n path('home',views.home,name=\"home\"),\n path('logout',views.logoutUser,name=\"logout\"),\n path('add_tasks',views.add_tasks,name=\"add_tasks\"),\n path('delete/',views.delete_task,name=\"delete_task\"),\n path('edit_task/',views.edit_task,name=\"edit_task\"),\n path('edit_tasks_save/',views.edit_tasks_save,name=\"edit_tasks_save\"),\n \n path('api/get/',getapi.as_view(), name='token_obtain_pair'),\n path('api/post/',postapi.as_view(), name='token_obtain_pair'),\n path('api/retrive/',Retriveapi.as_view(), name='token_obtain_pair'),\n path('api/destroye/',Destroyeapi.as_view(), name='token_obtain_pair'),\n \n path('api/register/',RegisterUserAPIView.as_view()),\n # path('api/register/', RegisterAPI.as_view(), name='register'),\n path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),\n\n\n\n]\n","repo_name":"pratapku/Assignment","sub_path":"DjangoHindiApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10773443561","text":"import pytest\n\nfrom sportorg.config import base_dir\nfrom sportorg.libs.ocad import ocad\n\n\n@pytest.fixture()\ndef classes_v8(request):\n file = base_dir('tests', 'data', 'CoursesV8.txt')\n return ocad.parse_txt_v8(file)\n\n\ndef test_v8_parse(classes_v8):\n file = base_dir('tests', 'data', 'CoursesV8.txt')\n assert classes_v8.parse(file)\n\n\ndef test_courses(classes_v8):\n assert classes_v8.courses\n\n\ndef test_groups(classes_v8):\n assert classes_v8.groups\n\n\ndef test_get_item():\n assert ocad.ClassesV8.get_courses(\n 'M16;Normal Course;0;5.700;130;S1;0.216;47;0.216;120;0.280;115;0.229;F1'\n )\n assert len(\n ocad.ClassesV8.get_courses(\n 'M16;Normal Course;0;5.700;130;S1;0.216;47;0.216;120;0.280;115;0.229;F1'\n )\n )\n course = ocad.ClassesV8.get_course(\n 'M16;Normal Course;0;5.700;130;S1;0.216;47;0.216;120;0.229;F1'.split(';')\n )\n for _, c in course.controls.items():\n assert c.code\n\n with pytest.raises(TypeError):\n ocad.ClassesV8.get_courses({})\n with pytest.raises(TypeError):\n ocad.ClassesV8.get_courses(0)\n","repo_name":"sportorg/pysport","sub_path":"tests/test_ocad.py","file_name":"test_ocad.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"62"} +{"seq_id":"74070206278","text":"import numpy as np\n\nA = np.matrix([[1, -5, 1, 0],\n [-3, 1, 0, 1]])\nb = np.matrix([[-10, -12]])\nc = [0, -6, 1, 0]\nJ_b = [2, 3]\n\ndef get_reverse_matrix(source_matrix, source_reverse_matrix, vector, i):\n l = source_reverse_matrix * vector\n if l[i] == 0:\n return None\n\n li = l.item(i)\n l[i] = -1\n l_cap = -1 / li * l\n e = np.identity(len(source_matrix))\n e[:, i] = np.transpose(l_cap)\n reverse_matrix = e * source_reverse_matrix\n\n return reverse_matrix\n\ndef dual_simplex(A, b, c, J_b):\n n = len(c)\n A_b = A[:, J_b]\n B = np.linalg.inv(A_b)\n c_b = [c[i] for i in J_b]\n print(\"\\nБазсиный вектор с: \", c_b)\n dual_optimal_plan = (c_b * B).transpose()\n print(\"\\nНачальный оптимальный план: \")\n print(dual_optimal_plan)\n iteration = 1\n while True:\n print(\"------- Итерация # {} -------\".format(iteration))\n print(\"\\nБазисная матрица А: \\n\", A_b)\n print(\"\\nОбратная базисная матрица B: \\n\", B)\n\n kappa_b = B * b\n print('Kappa:\\n {}\\n'.format(kappa_b.ravel().tolist()))\n\n kappa_plan = [0 if index not in J_b else kappa_b[J_b.index(index)].item(0) for index in range(n)]\n\n print('Каппа план: {}'.format(kappa_plan))\n\n negative_elements_indexes = get_negative_elements_indexes(kappa_plan)\n\n if len(negative_elements_indexes) == 0:\n break\n\n print('Индексы негативных переменных: {}'.format([i + 1 for i in negative_elements_indexes]))\n\n j_s = negative_elements_indexes[0]\n\n delta_y = B[J_b.index(j_s)]\n print(\"Двойственный план y': {}\".format(delta_y))\n\n mu = get_negative_mus(J_b, n, delta_y, A)\n print('Мю: {}'.format(mu))\n\n j0, sigma0 = get_min_sigma_with_index(c, A, mu, dual_optimal_plan)\n print('Минимальная сигма: {}'.format(sigma0))\n\n print('j_0: {}'.format(j0))\n\n dual_optimal_plan = dual_optimal_plan + sigma0.item(0) * delta_y.transpose()\n print('y: {}'.format(dual_optimal_plan.ravel().tolist()))\n\n J_b[J_b.index(j_s)] = j0\n print('\\nБазисные индексы: {}'.format(J_b))\n\n index = J_b.index(j0)\n B = get_reverse_matrix(A_b, B, A[:, j0], index)\n A_b[:, index] = A[:, j0]\n iteration = iteration + 1\n\n print(\"\\n\\nРезультат двойственного симлекс метода:\")\n print(\"Оптимальный план: {}\".format(kappa_plan))\n print(\"Базисные индексы: {}\".format(J_b))\n\ndef get_negative_elements_indexes(elements_list):\n indexes = [index for index in range(len(elements_list))\n if elements_list[index] < 0]\n return indexes\n\ndef get_negative_mus(J_b, n, delta_y, A):\n J_n = [index for index in range(n) if index not in J_b]\n mu = [delta_y * A[:, index] for index in J_n]\n negative_indexes = [J_n[index] for index in range(len(mu)) if mu[index] < 0]\n if len(negative_indexes) == 0:\n raise Exception\n\n mu_with_indexes = {negative_indexes[index]: float(mu[index]) for index in range(len(negative_indexes))}\n\n return mu_with_indexes\n\n\ndef get_min_sigma_with_index(c, A, mu, dual_optimal_plan):\n sigmas = {index: (c[index] - A[:, index].transpose() * dual_optimal_plan)/ mu_value for index, mu_value in\n mu.items()}\n print('\\nВектор сигма: {}'.format(sigmas))\n\n min_sigma_index = min(sigmas, key=sigmas.get)\n return min_sigma_index, sigmas.get(min_sigma_index)\n\ndual_simplex(A, b.transpose(), c, J_b)\n\n","repo_name":"Geniny/MOIY","sub_path":"[MOIY]Lab4.py","file_name":"[MOIY]Lab4.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"8983003772","text":"#!/usr/bin/python \n# -*- coding: utf-8 -*- \nimport os\n\ntry:\n path = 'F:\\\\python\\\\chonghcong\\\\lagou\\\\Lagou\\\\lagou\\\\user.json'\n file = open(path, 'rb')\n info = eval(file.read())\nexcept Exception as e:\n print(os.getcwd(),'\\\\user.json获取失败')\n raise \n\nlagou_user = info['lagou_user']\nlagou_psw = info['lagou_psw']\nemail_user = info['email_user']\nemail_auth_psw = info['email_auth_psw']\nemail_receiver = info['email_receiver']","repo_name":"zhouanqi/Python-practice-questions","sub_path":"lagou/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26839675757","text":"import re\nfrom PySide2.QtCore import Qt, Slot\nfrom PySide2.QtGui import QFont, QFontMetrics\nfrom PySide2.QtWidgets import (\n QGridLayout,\n QHBoxLayout,\n QLabel,\n)\n\nfrom .. import calculation as calc\nfrom .. import readapi as read_data\nfrom ..base import Widget\nfrom ..module_info import minfo\n\nWIDGET_NAME = \"sectors\"\nMAGIC_NUM = 99999 # magic number for default variable not updated by rF2\n\n\nclass Draw(Widget):\n \"\"\"Draw widget\"\"\"\n\n def __init__(self, config):\n # Assign base setting\n Widget.__init__(self, config, WIDGET_NAME)\n\n # Config font\n self.font = QFont()\n self.font.setFamily(self.wcfg['font_name'])\n self.font.setPixelSize(self.wcfg['font_size'])\n font_w = QFontMetrics(self.font).averageCharWidth()\n\n # Config variable\n bar_padx = round(self.wcfg[\"font_size\"] * self.wcfg[\"bar_padding\"])\n bar_gap = self.wcfg[\"bar_gap\"]\n\n # Base style\n self.setStyleSheet(\n f\"font-family: {self.wcfg['font_name']};\"\n f\"font-size: {self.wcfg['font_size']}px;\"\n f\"font-weight: {self.wcfg['font_weight']};\"\n f\"padding: 0 {bar_padx}px;\"\n )\n\n # Create layout\n layout = QGridLayout()\n layout.setContentsMargins(0,0,0,0) # remove border\n layout_laptime = QHBoxLayout()\n layout_sector = QHBoxLayout()\n layout_laptime.setSpacing(bar_gap)\n layout_sector.setSpacing(bar_gap)\n layout.setSpacing(bar_gap)\n layout.setAlignment(Qt.AlignLeft | Qt.AlignTop)\n\n # Speed\n if self.wcfg[\"show_speed\"]:\n self.bar_width_speed = font_w * 5\n self.bar_speed_curr = QLabel(\"\")\n self.bar_speed_curr.setAlignment(Qt.AlignCenter)\n self.bar_speed_curr.setStyleSheet(\n f\"color: {self.wcfg['font_color_speed']};\"\n f\"background: {self.wcfg['bkg_color_speed']};\"\n f\"min-width: {self.bar_width_speed}px;\"\n )\n self.bar_speed_best = QLabel(\"\")\n self.bar_speed_best.setAlignment(Qt.AlignCenter)\n self.bar_speed_best.setStyleSheet(\n f\"color: {self.wcfg['font_color_speed']};\"\n f\"background: {self.wcfg['bkg_color_speed']};\"\n f\"min-width: {self.bar_width_speed}px;\"\n )\n\n # Target time\n self.bar_width_laptime = font_w * 11\n self.bar_time_target = QLabel(\" --:--.---\")\n self.bar_time_target.setAlignment(Qt.AlignCenter)\n self.bar_time_target.setStyleSheet(\n f\"color: {self.wcfg['font_color_target_time']};\"\n f\"background: {self.wcfg['bkg_color_target_time']};\"\n f\"min-width: {self.bar_width_laptime}px;\"\n )\n\n # Current time\n self.bar_time_curr = QLabel(\" --:--.---\")\n self.bar_time_curr.setAlignment(Qt.AlignCenter)\n self.bar_time_curr.setStyleSheet(\n f\"color: {self.wcfg['font_color_current_time']};\"\n f\"background: {self.wcfg['bkg_color_current_time']};\"\n f\"min-width: {self.bar_width_laptime}px;\"\n )\n\n # Gap to best lap laptime\n self.bar_width_gap = font_w * 7\n self.bar_time_gap = QLabel(\"--.---\")\n self.bar_time_gap.setAlignment(Qt.AlignCenter)\n self.bar_time_gap.setStyleSheet(\n f\"color: {self.wcfg['font_color_laptime_gap']};\"\n f\"background: {self.wcfg['bkg_color_laptime_gap']};\"\n f\"min-width: {self.bar_width_gap}px;\"\n )\n if not self.wcfg[\"always_show_laptime_gap\"]: # hide laptime gap\n self.bar_time_gap.hide()\n\n # Gap to best sector time\n self.bar_s1_gap = QLabel(\"S1\")\n self.bar_s1_gap.setAlignment(Qt.AlignCenter)\n self.bar_s1_gap.setStyleSheet(\n f\"color: {self.wcfg['font_color_sector']};\"\n f\"background: {self.wcfg['bkg_color_sector']};\"\n f\"min-width: {self.bar_width_gap}px;\"\n )\n\n self.bar_s2_gap = QLabel(\"S2\")\n self.bar_s2_gap.setAlignment(Qt.AlignCenter)\n self.bar_s2_gap.setStyleSheet(\n f\"color: {self.wcfg['font_color_sector']};\"\n f\"background: {self.wcfg['bkg_color_sector']};\"\n f\"min-width: {self.bar_width_gap}px;\"\n )\n\n self.bar_s3_gap = QLabel(\"S3\")\n self.bar_s3_gap.setAlignment(Qt.AlignCenter)\n self.bar_s3_gap.setStyleSheet(\n f\"color: {self.wcfg['font_color_sector']};\"\n f\"background: {self.wcfg['bkg_color_sector']};\"\n f\"min-width: {self.bar_width_gap}px;\"\n )\n\n # Set layout\n layout_laptime.addWidget(self.bar_time_target)\n layout_laptime.addWidget(self.bar_time_curr)\n layout_sector.addWidget(self.bar_s1_gap)\n layout_sector.addWidget(self.bar_s2_gap)\n layout_sector.addWidget(self.bar_s3_gap)\n\n if self.wcfg[\"show_speed\"]:\n layout.addWidget(self.bar_speed_curr, 0, 0)\n layout.addWidget(self.bar_speed_best, 1, 0)\n\n if self.wcfg[\"layout\"] == 0:\n # Default layout, sector time above delta\n layout.addWidget(self.bar_time_gap, 0, 2)\n layout.addLayout(layout_laptime, 0, 1)\n layout.addLayout(layout_sector, 1, 1)\n else:\n # Horizontal layout\n layout.addWidget(self.bar_time_gap, 1, 2)\n layout.addLayout(layout_laptime, 1, 1)\n layout.addLayout(layout_sector, 0, 1)\n self.setLayout(layout)\n\n # Last data\n self.verified = False # load & save switch\n self.set_defaults()\n\n self.last_cb_topspeed = None\n self.last_ub_topspeed = None\n self.last_plr_place = None\n self.last_plr_laps = None\n\n # Set widget state & start update\n self.set_widget_state()\n self.update_timer.start()\n\n def set_defaults(self):\n \"\"\"Initialize variables\"\"\"\n self.last_lap_stime = 0 # last lap start time\n self.last_sector_idx = -1 # previous recorded sector index value\n self.combo_name = \"unknown\" # current car & track combo\n self.session_id = None # session identity\n\n self.best_laptime = MAGIC_NUM # best laptime (seconds)\n self.delta_s = [0,0,0] # deltabest times against all time best sector\n self.delta_bestlap_s = [0,0,0] # deltabest times against best laptime sector\n self.prev_s = [MAGIC_NUM,MAGIC_NUM,MAGIC_NUM] # previous sector times\n self.best_s = [MAGIC_NUM,MAGIC_NUM,MAGIC_NUM] # best sector times\n self.bestlap_s = [MAGIC_NUM,MAGIC_NUM,MAGIC_NUM] # best lap sector times\n\n self.valid_topspeed = True\n self.cb_topspeed = 0 # current-lap best top speed\n self.sb_topspeed = 0 # session best top speed\n self.ub_topspeed = 0 # unverified session best top speed\n self.speed_timer_start = 0 # speed timer start\n self.freeze_timer_start = 0 # sector timer start\n\n self.time_target_text = \" --:--.---\" # target time text\n self.last_time_target_text = \"\" # last recorded target time text for freeze\n self.update_time_target(self.time_target_text)\n\n @Slot()\n def update_data(self):\n \"\"\"Update when vehicle on track\"\"\"\n if self.wcfg[\"enable\"] and read_data.state():\n\n laptime_curr = minfo.delta.LaptimeCurrent\n\n # Read Sector data\n (sector_idx, curr_sector1, curr_sector2, last_sector2, last_laptime, speed\n ) = read_data.sector()\n lap_stime, lap_etime = read_data.lap_timestamp()\n\n # Save switch\n if not self.verified:\n self.verified = True\n self.set_defaults() # reset data\n self.load_saved_sector_data() # load saved sector data\n self.restore_best_sector(self.best_s) # Restore best sector time\n\n # Speed update\n if self.wcfg[\"show_speed\"]:\n # Lap start & finish detection\n if lap_stime != self.last_lap_stime: # time stamp difference\n self.cb_topspeed = speed # reset current lap fastest speed\n self.last_lap_stime = lap_stime # reset\n self.valid_topspeed = False\n\n # Validate fastest speed\n if not self.valid_topspeed and laptime_curr > 1:\n if last_laptime > 0: # valid last laptime\n self.sb_topspeed = self.ub_topspeed\n else: # invalid last laptime\n self.ub_topspeed = self.sb_topspeed # restore session fastest speed\n if self.cb_topspeed > self.ub_topspeed:\n self.ub_topspeed = self.cb_topspeed\n # Update session top speed display\n self.update_speed_best(self.ub_topspeed, MAGIC_NUM)\n self.valid_topspeed = True\n\n # Update current top speed display\n if speed > self.cb_topspeed:\n self.cb_topspeed = speed\n self.update_speed_curr(self.cb_topspeed, self.last_cb_topspeed)\n self.last_cb_topspeed = self.cb_topspeed\n\n # Update session top speed display\n if speed > self.ub_topspeed:\n self.ub_topspeed = speed\n self.speed_timer_start = lap_etime # start timer if speed higher\n\n if self.speed_timer_start:\n speed_timer = lap_etime - self.speed_timer_start\n if speed_timer >= max(self.wcfg[\"speed_highlight_duration\"], 0):\n self.speed_timer_start = 0 # stop timer\n self.update_speed_best(self.ub_topspeed, MAGIC_NUM)\n else:\n self.update_speed_best(self.ub_topspeed, self.last_ub_topspeed, True)\n\n self.last_ub_topspeed = self.ub_topspeed\n\n # Sector update\n\n # Update previous & best sector time\n if self.last_sector_idx != sector_idx: # keep checking until conditions met\n\n # While vehicle in S1, update S3 data\n if sector_idx == 0 and last_laptime > 0 and last_sector2 > 0:\n self.last_sector_idx = sector_idx # reset & stop checking\n self.update_sector3_data(last_laptime, last_sector2)\n\n # While vehicle in S2, update S1 data\n elif sector_idx == 1 and curr_sector1 > 0:\n self.last_sector_idx = sector_idx # reset\n self.update_sector1_data(curr_sector1)\n\n # While vehicle in S3, update S2 data\n elif sector_idx == 2 and curr_sector2 > 0 and curr_sector1 > 0:\n self.last_sector_idx = sector_idx # reset\n self.update_sector2_data(curr_sector2, curr_sector1)\n\n # Triggered when sector values reset\n if self.last_sector_idx == sector_idx:\n # Store last time target text for freeze state before update\n self.last_time_target_text = self.time_target_text\n\n # Update (time target) best sector text\n self.time_target_text = self.set_target_time(\n self.best_s, self.bestlap_s, sector_idx)\n\n # Activate freeze & sector timer\n self.freeze_timer_start = lap_etime\n\n # Freeze best sector time\n self.update_time_target(self.last_time_target_text)\n\n # Freeze current sector time\n self.update_time_curr(sector_idx, laptime_curr, True)\n\n # Triggered if no valid last laptime set & 8s after cross line\n # Necessary for correctly update target time for garage-pitout & app-restart\n if last_laptime < 0 and laptime_curr > 8:\n self.last_sector_idx = sector_idx # reset\n # Update (time target) best sector text\n self.time_target_text = self.set_target_time(\n self.best_s, self.bestlap_s, sector_idx)\n self.update_time_target(self.time_target_text)\n\n # Update freeze timer\n if self.freeze_timer_start:\n freeze_timer = lap_etime - self.freeze_timer_start\n\n # Stop freeze timer after duration\n if freeze_timer >= self.freeze_duration(self.prev_s[sector_idx]):\n self.freeze_timer_start = 0 # stop timer\n # Update best sector time\n self.update_time_target(self.time_target_text)\n # Restore best sector time when cross finish line\n if sector_idx == 0:\n self.restore_best_sector(self.best_s)\n # Hide laptime gap\n if not self.wcfg[\"always_show_laptime_gap\"]:\n self.bar_time_gap.hide()\n else:\n # Update current sector time\n self.update_time_curr(sector_idx, laptime_curr)\n\n else:\n if self.verified:\n self.verified = False # activate verification when enter track next time\n\n if not self.wcfg[\"always_show_laptime_gap\"]:\n self.bar_time_gap.hide()\n\n # Save only valid sector data\n if self.session_id and self.valid_sector(self.bestlap_s):\n self.wcfg[\"last_sector_info\"] = (\n str(self.combo_name)\n + \"|\" + str(self.session_id[0])\n + \"|\" + str(self.session_id[1])\n + \"|\" + str(self.session_id[2])\n + \"|\" + str(self.best_laptime)\n + \"|\" + str(self.best_s[0])\n + \"|\" + str(self.best_s[1])\n + \"|\" + str(self.best_s[2])\n + \"|\" + str(self.bestlap_s[0])\n + \"|\" + str(self.bestlap_s[1])\n + \"|\" + str(self.bestlap_s[2])\n + \"|\" + str(self.sb_topspeed)\n )\n self.cfg.save()\n\n # GUI update methods\n def update_speed_curr(self, curr, last):\n \"\"\"Current lap best top speed\"\"\"\n if curr != last:\n self.bar_speed_curr.setText(\n f\"{self.speed_units(curr):.01f}\")\n\n def update_speed_best(self, curr, last, highlighted=False):\n \"\"\"Session best top speed\"\"\"\n if curr != last:\n speed_text = f\"{self.speed_units(curr):.01f}\"\n if highlighted:\n color = (f\"color: {self.wcfg['font_color_speed_highlighted']};\"\n f\"background: {self.wcfg['bkg_color_speed_highlighted']};\")\n else:\n color = (f\"color: {self.wcfg['font_color_speed']};\"\n f\"background: {self.wcfg['bkg_color_speed']};\")\n\n self.bar_speed_best.setText(speed_text)\n self.bar_speed_best.setStyleSheet(\n f\"{color}min-width: {self.bar_width_speed}px;\")\n\n def update_time_gap(self, time_diff):\n \"\"\"Gap to best lap laptime\"\"\"\n self.bar_time_gap.setText(f\"{time_diff:+.03f}\"[:7])\n self.bar_time_gap.setStyleSheet(\n f\"color: {self.color_delta(time_diff, 1)};\"\n f\"background: {self.wcfg['bkg_color_laptime_gap']};\"\n f\"min-width: {self.bar_width_gap}px;\"\n )\n self.bar_time_gap.show()\n\n def update_time_target(self, time_text):\n \"\"\"Target sector time text\"\"\"\n self.bar_time_target.setText(time_text)\n\n def update_time_curr(self, sector_idx, laptime_curr, freeze=False):\n \"\"\"Current sector time text\"\"\"\n sector_text = (\"S1\",\"S2\",\"S3\")[sector_idx]\n curr_sectortime = laptime_curr\n\n # Freeze current sector time\n if freeze:\n prev_sector_idx = (2,0,1)[sector_idx]\n if self.valid_sector(self.prev_s[prev_sector_idx]): # valid previous sector time\n calc_sectortime = self.calc_sector_time(self.prev_s, prev_sector_idx)\n if calc_sectortime < MAGIC_NUM: # bypass invalid value\n curr_sectortime = calc_sectortime\n sector_text = (\"S1\",\"S2\",\"S3\")[prev_sector_idx]\n\n # Update current sector time\n self.bar_time_curr.setText(\n f\"{sector_text}{calc.sec2laptime(curr_sectortime)[:8].rjust(9)}\")\n\n def update_sector_gap(self, suffix, time_delta, highlighted=False):\n \"\"\"Gap to best sector time\"\"\"\n if highlighted:\n text = f\"{time_delta:+.03f}\"[:7]\n color = (f\"color: {self.wcfg['font_color_sector_highlighted']};\"\n f\"background: {self.color_delta(time_delta, 0)};\")\n else: # show previous sector time instead\n text = f\"{time_delta:.03f}\"[:7]\n color = (f\"color: {self.wcfg['font_color_sector']};\"\n f\"background: {self.wcfg['bkg_color_sector']};\")\n getattr(self, f\"bar_{suffix}\").setText(text)\n getattr(self, f\"bar_{suffix}\").setStyleSheet(\n f\"{color}min-width: {self.bar_width_gap}px;\")\n\n def restore_best_sector(self, sector_time):\n \"\"\"Restore best sector time\"\"\"\n for idx in range(3):\n text_s = f\"S{idx+1}\"\n if self.valid_sector(sector_time[idx]):\n text_s = f\"{sector_time[idx]:.03f}\"[:7]\n\n getattr(self, f\"bar_s{idx+1}_gap\").setText(text_s)\n getattr(self, f\"bar_s{idx+1}_gap\").setStyleSheet(\n f\"color: {self.wcfg['font_color_sector']};\"\n f\"background: {self.wcfg['bkg_color_sector']};\"\n f\"min-width: {self.bar_width_gap}px;\"\n )\n\n # Sector data update methods\n def update_sector3_data(self, last_laptime, last_sector2):\n \"\"\"Save previous sector 3 time\"\"\"\n self.prev_s[2] = last_laptime - last_sector2\n\n # Update (time gap) deltabest bestlap sector 3 text\n if self.valid_sector(self.bestlap_s[2]):\n self.delta_bestlap_s[2] = self.prev_s[2] - self.bestlap_s[2] + self.delta_bestlap_s[1]\n self.update_time_gap(self.delta_bestlap_s[2])\n\n # Update deltabest sector 3 text\n if self.valid_sector(self.best_s[2]):\n self.delta_s[2] = self.prev_s[2] - self.best_s[2]\n self.update_sector_gap(\"s3_gap\", self.delta_s[2], True)\n elif self.valid_sector(self.prev_s[2]):\n # Show previous sector time if no best sector time set\n self.update_sector_gap(\"s3_gap\", self.prev_s[2])\n\n # Save best sector 3 time\n if self.prev_s[2] < self.best_s[2]:\n self.best_s[2] = self.prev_s[2]\n\n # Save sector time from personal best laptime\n if last_laptime < self.best_laptime and self.valid_sector(self.prev_s):\n self.best_laptime = last_laptime\n self.bestlap_s = self.prev_s.copy()\n\n def update_sector1_data(self, curr_sector1):\n \"\"\"Save previous sector 1 time\"\"\"\n self.prev_s[0] = curr_sector1\n\n # Update (time gap) deltabest bestlap sector 1 text\n if self.valid_sector(self.bestlap_s[0]):\n self.delta_bestlap_s[0] = self.prev_s[0] - self.bestlap_s[0]\n self.update_time_gap(self.delta_bestlap_s[0])\n\n # Update deltabest sector 1 text\n if self.valid_sector(self.best_s[0]):\n self.delta_s[0] = self.prev_s[0] - self.best_s[0]\n self.update_sector_gap(\"s1_gap\", self.delta_s[0], 1)\n elif self.valid_sector(self.prev_s[0]):\n # Show previous sector time if no best sector time set\n self.update_sector_gap(\"s1_gap\", self.prev_s[0])\n\n # Save best sector 1 time\n if self.prev_s[0] < self.best_s[0]:\n self.best_s[0] = self.prev_s[0]\n\n def update_sector2_data(self, curr_sector2, curr_sector1):\n \"\"\"Save previous sector 2 time\"\"\"\n self.prev_s[1] = curr_sector2 - curr_sector1\n\n # Update (time gap) deltabest bestlap sector 2 text\n if self.valid_sector(self.bestlap_s[1]):\n self.delta_bestlap_s[1] = self.prev_s[1] - self.bestlap_s[1] + self.delta_bestlap_s[0]\n self.update_time_gap(self.delta_bestlap_s[1])\n\n # Update deltabest sector 2 text\n if self.valid_sector(self.best_s[1]):\n self.delta_s[1] = self.prev_s[1] - self.best_s[1]\n self.update_sector_gap(\"s2_gap\", self.delta_s[1], 1)\n elif self.valid_sector(self.prev_s[1]):\n # Show previous sector time if no best sector time set\n self.update_sector_gap(\"s2_gap\", self.prev_s[1])\n\n # Save best sector 2 time\n if self.prev_s[1] < self.best_s[1]:\n self.best_s[1] = self.prev_s[1]\n\n def set_target_time(self, sec_tb, sec_pb, sec_index):\n \"\"\"Set target sector time text\"\"\"\n # Mode 0 - show theoretical best sector, only update if all sector time is valid\n if self.wcfg[\"target_time_mode\"] == 0:\n sector_time = self.calc_sector_time(sec_tb, sec_index)\n if sector_time < MAGIC_NUM: # bypass invalid value\n return f\"TB{calc.sec2laptime(sector_time)[:8].rjust(9)}\"\n # Mode 1 - show personal best lap sector\n else:\n sector_time = self.calc_sector_time(sec_pb, sec_index)\n if sector_time < MAGIC_NUM: # bypass invalid value\n return f\"PB{calc.sec2laptime(sector_time)[:8].rjust(9)}\"\n return \" --:--.---\"\n\n # Additional methods\n def speed_units(self, value):\n \"\"\"Speed units\"\"\"\n if self.cfg.units[\"speed_unit\"] == \"MPH\":\n return calc.mps2mph(value)\n if self.cfg.units[\"speed_unit\"] == \"m/s\":\n return value\n return calc.mps2kph(value)\n\n @staticmethod\n def valid_sector(sec_time):\n \"\"\"Validate sector time\"\"\"\n if isinstance(sec_time, list):\n if MAGIC_NUM not in sec_time:\n return True\n else:\n if MAGIC_NUM != sec_time:\n return True\n return False\n\n def freeze_duration(self, seconds):\n \"\"\"Set freeze duration\"\"\"\n if self.valid_sector(seconds):\n max_freeze = seconds / 2\n else:\n max_freeze = 3\n return min(max(self.wcfg[\"freeze_duration\"], 0), max_freeze)\n\n @staticmethod\n def calc_sector_time(sec_time, sec_index):\n \"\"\"Calculate accumulated sector time\"\"\"\n if sec_index == 1: # sector 2 sum\n return sec_time[0] + sec_time[1]\n if sec_index == 2: # sector 3 sum\n return sum(sec_time)\n return sec_time[0] # sector 1\n\n def color_delta(self, seconds, types):\n \"\"\"Sector delta color\"\"\"\n if types: # 1 = foreground\n if seconds < 0:\n color = self.wcfg[\"font_color_time_gain\"]\n else:\n color = self.wcfg[\"font_color_time_loss\"]\n else: # 0 = background\n if seconds < 0:\n color = self.wcfg[\"bkg_color_time_gain\"]\n else:\n color = self.wcfg[\"bkg_color_time_loss\"]\n return color\n\n def load_saved_sector_data(self):\n \"\"\"Load and verify saved sector data\"\"\"\n saved_data = self.parse_save_string(self.wcfg[\"last_sector_info\"])\n # Check if saved data is from same session, car, track combo\n self.combo_name = read_data.combo_check()\n self.session_id = read_data.session_check()\n if self.combo_name == saved_data[0]:\n # Check if saved data belongs to current session, discard if not\n if (saved_data[1] == self.session_id[0]\n and saved_data[2] <= self.session_id[1]\n and saved_data[3] <= self.session_id[2]):\n # Assign loaded data\n self.best_laptime = saved_data[4]\n self.best_s = saved_data[5]\n self.bestlap_s = saved_data[6]\n self.ub_topspeed = self.sb_topspeed = saved_data[7]\n\n def parse_save_string(self, save_data):\n \"\"\"Parse last saved sector data\"\"\"\n rex_string = re.split(r\"(\\|)\", save_data)\n data_gen = self.split_save_string(rex_string)\n data = list(data_gen)\n\n try: # fill in data\n final_list = [\n data[0], # combo name, str\n data[1], # session stamp, str\n data[2], # session elapsed time, float\n data[3], # session total laps, float\n data[4], # session PB laptime, float\n [data[5],data[6],data[7]], # session all time best sector, float\n [data[8],data[9],data[10]], # session PB laptime sector, float\n data[11] # session fastest top speed, float\n ]\n except IndexError: # reset data\n final_list = [\"None\"]\n\n return final_list\n\n @staticmethod\n def split_save_string(rex_string):\n \"\"\"Split save string\"\"\"\n for index, val in enumerate(rex_string):\n if val != \"|\":\n if index <= 2:\n yield val\n else:\n yield float(val)\n","repo_name":"s-victor/TinyPedal","sub_path":"tinypedal/widget/sectors.py","file_name":"sectors.py","file_ext":"py","file_size_in_byte":25693,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"62"} +{"seq_id":"24208976648","text":"from typing import Optional\n\nfrom fastapi import APIRouter, HTTPException, Depends\nfrom sqlmodel import Session\n\nfrom app.core.db import get_session\nfrom app.crud.crud_todo_item import crud_todo_item\nfrom app.crud.crud_todo_list import crud_todo_list\nfrom app.dependencies.auth import get_current_user\nfrom app.models.todo_item import (\n TodoItem,\n TodoItemCreate,\n TodoItemSortingFields,\n TodoItemUpdate,\n)\nfrom app.models.user import User\nfrom app.schamas.paginated_response import PaginatedResponse, SortingOrder\n\nrouter = APIRouter()\n\n\n@router.post(\"/{todo_list_id}/items\", response_model=TodoItem)\nasync def create_todo_item(\n todo_list_id: int,\n todo_item_in: TodoItemCreate,\n db_session: Session = Depends(get_session),\n user: User = Depends(get_current_user),\n) -> TodoItem:\n todo_list = crud_todo_list.get_for_user(\n db_session, id=todo_list_id, user_id=user.id\n )\n\n if not todo_list:\n raise HTTPException(\n status_code=404,\n detail=\"Todo list not found\",\n )\n\n return crud_todo_item.create_for_list(\n db=db_session, obj_in=todo_item_in, todo_list_id=todo_list_id\n )\n\n\n@router.get(\"/{todo_list_id}/items\", response_model=PaginatedResponse[TodoItem])\nasync def get_todo_items(\n todo_list_id: int,\n page: int = 1,\n page_size: int = 10,\n search: Optional[str] = None,\n sorting_order: SortingOrder = SortingOrder.asc,\n sorting_key: TodoItemSortingFields = \"id\",\n db_session: Session = Depends(get_session),\n user: User = Depends(get_current_user),\n) -> PaginatedResponse[TodoItem]:\n todo_list = crud_todo_list.get_for_user(\n db_session, id=todo_list_id, user_id=user.id\n )\n\n if not todo_list:\n raise HTTPException(\n status_code=404,\n detail=\"Todo list not found\",\n )\n\n return crud_todo_item.get_for_list_paginated(\n db_session,\n page=page,\n per_page=page_size,\n search=search,\n todo_list_id=todo_list_id,\n sorting_key=sorting_key,\n sorting_order=sorting_order,\n )\n\n\n@router.patch(\"/{todo_list_id}/items/{todo_item_id}\", response_model=TodoItem)\nasync def update_todo_item(\n todo_list_id: int,\n todo_item_id: int,\n todo_item_in: TodoItemUpdate,\n db_session: Session = Depends(get_session),\n user: User = Depends(get_current_user),\n) -> TodoItem:\n todo_list = crud_todo_list.get_for_user(\n db_session, id=todo_list_id, user_id=user.id\n )\n\n if not todo_list:\n raise HTTPException(\n status_code=404,\n detail=\"Todo list not found\",\n )\n\n todo_item = crud_todo_item.get(db_session, id=todo_item_id)\n\n if not todo_item:\n raise HTTPException(\n status_code=404,\n detail=\"Todo item not found\",\n )\n\n return crud_todo_item.update(db_session, db_obj=todo_item, obj_in=todo_item_in)\n\n\n@router.delete(\"/{todo_list_id}/items/{todo_item_id}\", response_model=None)\nasync def delete_todo_item(\n todo_list_id: int,\n todo_item_id: int,\n db_session: Session = Depends(get_session),\n user: User = Depends(get_current_user),\n) -> None:\n todo_list = crud_todo_list.get_for_user(\n db_session, id=todo_list_id, user_id=user.id\n )\n\n if not todo_list:\n raise HTTPException(\n status_code=404,\n detail=\"Todo list not found\",\n )\n\n todo_item = crud_todo_item.get(db_session, id=todo_item_id)\n\n if not todo_item:\n raise HTTPException(\n status_code=404,\n detail=\"Todo item not found\",\n )\n\n crud_todo_item.remove(db_session, id=todo_item_id)\n return None\n","repo_name":"ikrzywda/fastapi-nextjs-fullstack-template","sub_path":"backend/app/app/api/v1/endpoints/todo_lists/todo_items.py","file_name":"todo_items.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"2104493536","text":"#!/usr/bin/env python3\nimport re\nimport urllib.request\nf = urllib.request.urlopen(\"http://2018shell.picoctf.com:55790/\")\ntext = f.read().decode('utf-8')\n \nflag = re.findall(\"\\'(.*?)\\'\",text,re.DOTALL)\t\ni = len(flag)\nwhile i > 0:\n\tprint(flag[i-1],end='')\n\ti-=1\nprint()\n","repo_name":"manulqwerty/picoCTF-2018-WriteUp","sub_path":"Web Exploitation/Client Side is Still Bad/getFlag.py","file_name":"getFlag.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"12972685413","text":"import pytest\nimport pathlib\nimport random\nimport os\n\nfrom Infra.assembler_wrapper import AssemblerTestRunner, AssemblyLine, PythonAssemblerTestRunner, OPCODE_TO_NUMBER, REGISTER_TO_NUMBER\nfrom Infra import utils\n\nASSEMBLER_PATH = \"../ComputerOrganizationProcessor/build/assembler\"\n# ASSEMBLER_PATH = r\"..\\ComputerOrganizationProcessor\\VisualStudio\\Assembler\\x64\\Debug\\Assembler.exe\"\n\nTESTS_BASE_FOLDER = pathlib.Path(__file__).parent.resolve()\n\n\n@pytest.mark.sanity\n@pytest.mark.assembler\ndef test_python_assembler():\n runner = PythonAssemblerTestRunner(ASSEMBLER_PATH)\n runner.set_input_data_from_file(f\"{TESTS_BASE_FOLDER}/resources/fib.asm\")\n runner.set_expected_output_from_file(f\"{TESTS_BASE_FOLDER}/resources/memin.txt\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\ndef test_assembler_sanity_custom_expected_output(tmp_path):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(\"halt $zero, $zero, $zero, 0\")\n runner.set_expected_output(\"15000\\n\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\ndef test_assembler_sanity_compare_from_python(tmp_path):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(\"add $zero, $zero, $zero, 0\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"opcode\", [op for op in OPCODE_TO_NUMBER.keys()])\ndef test_assembler_all_opcodes(tmp_path, opcode):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"{opcode} $zero, $zero, $zero, 0\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"rt\", [rt for rt in REGISTER_TO_NUMBER.keys()])\ndef test_assembler_all_registers_rt(tmp_path, rt):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"add {rt}, $zero, $zero, 0\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"rs\", [rs for rs in REGISTER_TO_NUMBER.keys()])\ndef test_assembler_all_registers_rs(tmp_path, rs):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"add $zero, {rs}, $zero, 0\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"rd\", [rd for rd in REGISTER_TO_NUMBER.keys()])\ndef test_assembler_all_registers_rd(tmp_path, rd):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"add $zero, $zero, {rd}, 0\")\n runner.run()\n\n@pytest.mark.stress\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"opcode\", [op for op in OPCODE_TO_NUMBER.keys()])\n@pytest.mark.parametrize(\"rt\", [rt for rt in REGISTER_TO_NUMBER.keys()])\n@pytest.mark.parametrize(\"rs\", [rs for rs in REGISTER_TO_NUMBER.keys()])\n@pytest.mark.parametrize(\"rd\", [rd for rd in REGISTER_TO_NUMBER.keys()])\ndef test_assembler_all_ops_and_regs(tmp_path, opcode, rt, rs, rd):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"{opcode} {rt}, {rs}, {rd}, 0\")\n runner.run()\n\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"imm\", [100, 0, -100, 0x100, -0x100,\n 0xfffff, 524287, -524287])\ndef test_assembler_imm_value_range(tmp_path, imm):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"add $zero, $zero, $imm, {imm}\")\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"imm\", [0, 0x1, -53, 0x1337])\ndef test_assembler_imm_in_all_regs(tmp_path, imm):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(f\"add $imm, $imm, $imm, {imm}\")\n runner.run()\n\ndef generate_random_command(newline=True, add_random_chars=True):\n opcode = random.choice([op for op in OPCODE_TO_NUMBER.keys()])\n rt = random.choice([reg for reg in REGISTER_TO_NUMBER.keys()])\n rs = random.choice([reg for reg in REGISTER_TO_NUMBER.keys()])\n rd = random.choice([reg for reg in REGISTER_TO_NUMBER.keys()])\n if add_random_chars:\n break1 = random.choice(10*[\" \"] + ['\\t', \"\\t\\t\", \" \", \" \\t \", \"\\t \\t\"])\n break2 = random.choice(10*[\" \"] + ['\\t', \"\\t\\t\", \" \", \" \\t \", \"\\t \\t\"])\n break3 = random.choice(10*[\" \"] + ['\\t', \"\\t\\t\", \" \", \" \\t \", \"\\t \\t\"])\n break4 = random.choice(10*[\" \"] + ['\\t', \"\\t\\t\", \" \", \" \\t \", \"\\t \\t\"])\n command = f\"{opcode}{break1}{rt},{break2}{rs},{break3}{rd},{break4}0\"\n else:\n command = f\"{opcode} {rt}, {rs}, {rd}, 0\"\n if newline:\n command += os.linesep\n\n return command\n\ndef generate_random_command_with_label(label, newline=True):\n opcode = random.choice([op for op in OPCODE_TO_NUMBER.keys()])\n rt = random.choice([reg for reg in REGISTER_TO_NUMBER.keys() if reg != \"$imm\"])\n rs = random.choice([reg for reg in REGISTER_TO_NUMBER.keys() if reg != \"$imm\"])\n rd = random.choice([reg for reg in REGISTER_TO_NUMBER.keys() if reg != \"$imm\"])\n command = f\"{opcode} {rt}, {rs}, {rd}, {label}\"\n\n if newline:\n command += os.linesep\n\n return command\n\ndef generate_word_command(address, value, newline=True):\n command = f\".word {address} {value}\"\n\n if newline:\n command += os.linesep\n\n return command\n\n@pytest.mark.sanity\n@pytest.mark.assembler\ndef test_assembler_line_with_comments(tmp_path):\n command = \"\"\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n command += f\"{generate_random_command(newline=False)}#Some comment\\n\"\n command += f\"{generate_random_command(newline=False)}#######Some comment\\n\"\n command += f\"{generate_random_command(newline=False)}\\t\\t\\t #######\\n\"\n command += f\"{generate_random_command(newline=False)} # \\n\"\n command += f\"{generate_random_command(newline=False)}# # # # AA\\n\"\n runner.set_input_data_from_str(command)\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\ndef test_assembler_long_line(tmp_path):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n # Add comment followed by 'A' for entire command max length\n command = generate_random_command(newline=False)\n command += \"#\"\n # Max line length is 300, but we need extra space for the \\0\n command += (299-len(command)) * 'A'\n runner.set_input_data_from_str(command)\n print(command)\n runner.run()\n\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"max_label_length\", [5, 10, 20])\n@pytest.mark.parametrize(\"extra_label_calls\", [0, 30, 50])\ndef test_assembler_many_labels(tmp_path, max_label_length, extra_label_calls):\n # TODO: Adding label at the first without any commands before it (address zero) cause seg fault\n command = \"\"\n command += generate_random_command()\n labels_names = [utils.get_random_string(x) for x in range(1, max_label_length)]\n random.shuffle(labels_names)\n for label in labels_names:\n command += f\"{label}:\\n\"\n command += generate_random_command_with_label(label)\n\n # Add extra calls to labels\n for _ in range(extra_label_calls):\n command += generate_random_command_with_label(random.choice(labels_names))\n\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(command)\n print(command)\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\ndef test_assembler_label_in_the_middle(tmp_path):\n label_name = \"SomeLabel\"\n command = \"\"\n for _ in range(5):\n command += generate_random_command_with_label(label_name)\n command += generate_random_command()\n command += generate_random_command_with_label(label_name)\n command += generate_random_command_with_label(label_name)\n command += f\"{label_name}:\\n\"\n for _ in range(5):\n command += generate_random_command_with_label(label_name)\n command += generate_random_command()\n command += generate_random_command()\n command += generate_random_command_with_label(label_name)\n command += generate_random_command_with_label(label_name)\n\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(command)\n print(command)\n runner.run()\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"address,value\", [(10, 100),\n (256, 0x17),\n (0xaa, 11),\n (0xbb, 0xcc)])\ndef test_assembler_word_command(tmp_path, address, value):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n command = \"\"\n\n command += generate_random_command()\n command += generate_word_command(address, value)\n\n\n runner.set_input_data_from_str(command)\n print(command)\n runner.run()\n\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"iter_number\", range(10))\ndef test_assembler_multiple_word_commands(tmp_path, iter_number):\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n command = \"\"\n\n command += generate_random_command()\n command += generate_random_command()\n command += generate_random_command()\n command += generate_word_command(0x30, 45)\n command += generate_random_command()\n command += generate_word_command(300, 100)\n command += generate_word_command(302, 102)\n command += generate_word_command(301, 101)\n command += generate_word_command(0x20, 35)\n for i in range(iter_number):\n command += generate_word_command(random.randint(0, 4095), random.randint(0, 127))\n\n runner.set_input_data_from_str(command)\n print(command)\n runner.run()\n\n\ndef random_command_generation(tmp_path, number_of_commands):\n commands = []\n\n # Taking only a single label in order to prvent 2 lables after each other\n labels_names = [utils.get_random_string(x) for x in range(1, 2)]\n for label_name in labels_names:\n commands.append(f\"{label_name}:\\n\")\n number_of_commands -=1\n\n for _ in range(number_of_commands//3):\n commands.append(generate_random_command_with_label(random.choice(labels_names)))\n number_of_commands -= 1\n\n for _ in range(number_of_commands):\n commands.append(generate_random_command())\n number_of_commands -= 1\n\n # TODO: Add words as well after fixing bugs over there\n\n random.shuffle(commands)\n command = \"\".join(commands)\n runner = AssemblerTestRunner(ASSEMBLER_PATH, tmp_path.as_posix())\n runner.set_input_data_from_str(command)\n print(command)\n runner.run()\n\n\n@pytest.mark.sanity\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"number_of_commands\", [5, 100, 300])\ndef test_assembler_random_command_generation(tmp_path, number_of_commands):\n random_command_generation(tmp_path, number_of_commands)\n\n@pytest.mark.stress\n@pytest.mark.assembler\n@pytest.mark.parametrize(\"number_of_commands\", [x for x in range(5, 1000)])\n@pytest.mark.parametrize(\"iter_number\", [x for x in range(5)])\ndef test_assembler_random_command_generation_stress(tmp_path, number_of_commands, iter_number):\n random_command_generation(tmp_path, number_of_commands)\n","repo_name":"IdanMeyer/ComputerOrganizationTestingInfra","sub_path":"tests/test_assembler.py","file_name":"test_assembler.py","file_ext":"py","file_size_in_byte":11250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35677240286","text":"import unittest\r\nfrom lab2_task2 import Json\r\nimport json\r\n\r\nclass MyTestCase(unittest.TestCase):\r\n def test_something(self):\r\n my_json = Json()\r\n set = [228, False, 14.82, True, {5: \"BSUIR1\", 0: 28}, [88, 55]]\r\n self.assertEqual(my_json.to_json(set), json.dumps(set))\r\n\r\n def test_1(self):\r\n my_json = Json()\r\n test = {\"vor\": 100}\r\n self.assertEqual(my_json.to_json(test), json.dumps(test))\r\n\r\n def test_2(self):\r\n my_json = Json()\r\n test = 100\r\n self.assertEqual(my_json.to_json(test), json.dumps(test))\r\n\r\n def test_3(self):\r\n my_json = Json()\r\n test = {\"Univ\": \"Bsuir\", \"vor\": [3, \"str\"]}\r\n self.assertEqual(my_json.to_json(test), json.dumps(test))\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n\r\n","repo_name":"vgpnkw/BSUIR-PYTHON-2020","sub_path":"Solutions/Task2/853503_Виталий_Гапаньков/unitest_task2.py","file_name":"unitest_task2.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"29773957274","text":"import arcade\n\nimport src.const as C\n\nfrom src.audio import Audio\nfrom src.lib import global_scale\nimport src.save_data as save_data\nfrom src.tracker import Tracker\n\nfrom src.sprite.bullet import Bullet\nfrom src.sprite.enemy import Enemy\n\n\nclass PauseMenuView(arcade.View):\n \"\"\"\n PauseMenuView View\n\n ...\n\n Methods\n -------\n on_show()\n Show the pause menu view\n on_draw()\n Draw the pause menu view\n on_mouse_press(x: float, y: float, button: int, modifiers: int)\n Listen to mouse press event\n on_key_press(key: int, modifiers: int)\n Listen to keyboard press event\n \"\"\"\n\n def __init__(self, game_view, map_view, current_level):\n # Inherit parent class\n super().__init__()\n\n self.cursor_sprite = None\n self.background = None\n self.btn_list = None\n self.highlight = False\n self.normal_scale = 0.8 * global_scale()\n self.highlight_scale = 1 * global_scale()\n\n self.bgm_stream = None\n self.sfx_click = None\n self.sfx_meow = None\n\n self.game_view = game_view\n self.map_view = map_view\n self.current_level = current_level\n\n self.btn_dict_ = [\n {\"img_name\": \"btn_resume.png\",\n \"name\": \"resume\",\n \"center_x\": C.SCREEN_WIDTH * .75 // 1,\n \"center_y\": C.SCREEN_HEIGHT * .65 // 1,\n },\n {\"img_name\": \"btn_back_to_map.png\",\n \"name\": \"back_to_map\",\n \"center_x\": C.SCREEN_WIDTH * .75 // 1,\n \"center_y\": C.SCREEN_HEIGHT * .50 // 1,\n },\n {\"img_name\": \"btn_quit_game.png\",\n \"name\": \"quit_game\",\n \"center_x\": C.SCREEN_WIDTH * .75 // 1,\n \"center_y\": C.SCREEN_HEIGHT * .35 // 1,\n },\n ]\n # Find & set pause menu bgm\n view = None\n for view_dict in C.VIEW_LIST:\n if view_dict[\"name\"] == \"Pause\":\n view = view_dict\n for _i, bgm in enumerate(Audio.bgm_list):\n if bgm[\"view_name\"] == view[\"name\"]:\n self.bgm = bgm[\"sound\"]\n break\n # Find & set click sfx\n for _i, sfx in enumerate(Audio.sfx_list):\n if sfx[\"file_name\"] == \"ui/\" + C.AUDIO.SOUND[\"ui_click\"][\"name\"]:\n self.sfx_click = sfx[\"sound\"]\n break\n\n # Find & set meow sfx\n for _i, sfx in enumerate(Audio.sfx_list):\n if sfx[\"file_name\"] == \"ui/\" + C.AUDIO.SOUND[\"ui_meow\"][\"name\"]:\n self.sfx_meow = sfx[\"sound\"]\n break\n\n # Start bgm\n self.bgm_stream = Audio.play_sound(self.bgm, True)\n\n def setup(self):\n \"\"\" Set up everything with the pause view \"\"\"\n\n # Create the sprites\n self.background = arcade.load_texture(\n \"src/resources/images/pause_view/screen_pause.png\")\n self.cursor_sprite = arcade.Sprite(\n \"src/resources/images/goat_cursor.png\", 1)\n\n # Create the buttons sprite list\n self.btn_list = arcade.SpriteList(is_static=True)\n for btn_dict in self.btn_dict_:\n button = arcade.Sprite(\n filename=\"src/resources/images/pause_view/\" + btn_dict[\"img_name\"],\n scale=self.normal_scale)\n button.name = btn_dict[\"name\"]\n button.center_x = btn_dict[\"center_x\"]\n button.center_y = btn_dict[\"center_y\"]\n self.btn_list.append(button)\n\n def on_show(self):\n \"\"\"Called when switching to this view.\"\"\"\n self.setup()\n\n def on_draw(self):\n \"\"\"Draw the menu\"\"\"\n self.clear()\n\n # Draw the bg image\n arcade.draw_lrwh_rectangle_textured(\n bottom_left_x=0,\n bottom_left_y=0,\n width=arcade.get_window().width,\n height=arcade.get_window().height,\n texture=self.background)\n\n # Draw buttons\n self.btn_list.draw()\n\n # Draw the cursor\n self.cursor_sprite.draw()\n\n def on_update(self, delta_time: float):\n hit_list = arcade.check_for_collision_with_list(\n self.cursor_sprite, self.btn_list)\n\n if len(hit_list):\n for i, monument in enumerate(self.btn_list):\n if i != self.btn_list.index(hit_list[0]):\n monument.scale = self.normal_scale\n else:\n monument.scale = self.highlight_scale\n self.highlight = True\n elif self.highlight:\n for monument in self.btn_list:\n monument.scale = self.normal_scale\n self.highlight = False\n\n def on_mouse_motion(self, x, y, dx, dy):\n self.cursor_sprite.center_x = x + \\\n C.MAP[\"Cursor\"][\"offset_x\"] * global_scale()\n self.cursor_sprite.center_y = y + \\\n C.MAP[\"Cursor\"][\"offset_y\"] * global_scale()\n\n # # Check if shops hit cursor (Simply because less number of checking)\n # if self.shop_sprite.collides_with_sprite(self.cursor_sprite):\n # self.shop_sprite.color = (0, 255, 0)\n # self.shop_sprite.scale = .24 * global_scale()\n # else:\n # self.shop_sprite.color = (255, 255, 255)\n # self.shop_sprite.scale = .2 * global_scale()\n\n def on_mouse_press(self, _x, _y, _button, _modifiers):\n \"\"\"Use a mouse press to advance to the 'game' view.\"\"\"\n\n hit_btn = arcade.check_for_collision_with_list(\n self.cursor_sprite, self.btn_list)\n if hit_btn:\n\n # Play click sfx\n Audio.play_sound(self.sfx_click)\n\n if hit_btn[0].name == \"resume\":\n self.resume()\n elif hit_btn[0].name == \"quit_game\":\n self.quit_game()\n elif hit_btn[0].name == \"back_to_map\":\n self.to_map()\n\n # If hit the cat\n _scale = global_scale()\n if 20 * _scale < _x < 120 * _scale and 270 * _scale < _y < 370 * _scale:\n Audio.play_sound(self.sfx_meow)\n Tracker.trigger_easter_egg()\n\n def on_key_press(self, symbol, _modifiers):\n \"\"\"Handle keyboard key press\"\"\"\n if symbol == arcade.key.Q:\n self.quit_game()\n elif symbol == arcade.key.M:\n self.to_map()\n elif symbol == arcade.key.SPACE:\n self.resume()\n\n def resume(self):\n Audio.stop_sound(self.bgm_stream)\n self.bgm_stream = None\n self.window.show_view(self.game_view)\n\n def quit_game(self):\n # Stop bgm\n Audio.stop_sound(self.bgm_stream)\n self.bgm_stream = None\n arcade.exit()\n\n def to_map(self):\n Audio.stop_sound(self.bgm_stream)\n self.bgm_stream = None\n self.window.show_view(self.map_view)\n self.exit_level()\n\n def exit_level(self):\n save_data.GameData.update_highscore(self.current_level)\n save_data.GameData.deposit_gold()\n Tracker.reset_trackers()\n\n # Reset bullets\n Bullet.friendly_bullet_list = arcade.SpriteList()\n Bullet.enemy_bullet_list = arcade.SpriteList()\n\n # Reset enemies\n Enemy.enemy_list = arcade.SpriteList()\n","repo_name":"MrTanoshii/PyWeek-33-Metro","sub_path":"src/view/pause_menu.py","file_name":"pause_menu.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"71099339079","text":"import os\nimport csv\nfrom tqdm import tqdm\nfrom termcolor import colored\n\n\ndef textToCSV(input_file_path, input_file_name, output_file_path, output_file_name):\n try:\n input_file = os.path.join(input_file_path, input_file_name)\n output_file = os.path.join(output_file_path, output_file_name)\n print(colored(f\"> Converting {input_file} to {output_file}\", \"green\"))\n total_lines = sum(1 for line in open(input_file))\n\n with open(input_file, 'r') as txt_file, open(output_file, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n\n for line in tqdm(txt_file, total=total_lines, desc=colored(\"Converting .txt to .csv\", \"yellow\")):\n data = line.strip().split()\n rank = data[0]\n name = ' '.join(data[1:-3])\n university = data[-3]\n roll_no = data[-2]\n section = data[-1]\n csv_writer.writerow([rank, name, university, roll_no, section])\n\n print(colored(\"> Conversion complete\", \"green\"))\n\n except FileNotFoundError as e:\n print(colored(f\"Error: {e}\", \"red\"))\n except Exception as e:\n print(colored(f\"An unexpected error occurred: {e}\", \"red\"))\n","repo_name":"prajeshElEvEn/file-tweak","sub_path":"scripts/textToCSV.py","file_name":"textToCSV.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13751803560","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 23 20:29:05 2020\n\n@author: Mami\n\"\"\"\n\ndef sortList(L, n):\n L2 = []\n counter = 0\n while (counter <= n):\n m, idx = searchMinFromList(L, n)\n L2.append(m)\n del L[idx]\n n = n-1\n counter = counter + 1\n return L2","repo_name":"handsome-linen/paradox","sub_path":"P/sortList.py","file_name":"sortList.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36680674917","text":"from django.db import models\nfrom django.forms import model_to_dict\nfrom Apps.suppliers.models import Supplier\n\n\nclass Category(models.Model):\n STATUS_CHOICES = ( # new\n (\"ACTIVO\", \"Activo\"),\n (\"INACTIVO\", \"Inactivo\")\n )\n\n name = models.CharField(max_length=256)\n description = models.TextField(max_length=256)\n status = models.CharField(\n choices=STATUS_CHOICES,\n max_length=100,\n verbose_name=\"Status of the category\",\n )\n\n class Meta:\n # Table's name\n db_table = \"Category\"\n verbose_name_plural = \"Categories\"\n\n def __str__(self) -> str:\n return self.name\n\nclass Warehouse(models.Model):\n STATUS_CHOICES = ( # new\n (\"ACTIVO\", \"Activo\"),\n (\"INACTIVO\", \"Inactivo\")\n )\n\n name = models.CharField(max_length=256)\n description = models.TextField(max_length=256)\n status = models.CharField(\n choices=STATUS_CHOICES,\n max_length=100,\n verbose_name=\"Status of the category\",\n )\n\n class Meta:\n db_table = \"Warehouse\"\n verbose_name_plural = \"Warehouses\"\n\n def __str__(self) -> str:\n return self.name\n\nclass RuleSupply(models.Model):\n STATUS_CHOICES = ( # new\n (\"ACTIVO\", \"Activo\"),\n (\"INACTIVO\", \"Inactivo\")\n )\n minimumAmount = models.FloatField()\n maximumQuantity = models.FloatField()\n supplier = models.ForeignKey(\n Supplier, models.DO_NOTHING, db_column='supplier', default= None, null=True, blank=True)\n status = models.CharField(\n choices=STATUS_CHOICES,\n max_length=100,\n verbose_name=\"Status of the rule supply\",\n default=\"INACTIVO\" # Puedes establecer un valor predeterminado según tus necesidades.\n )\n\n class Meta:\n db_table = \"RuleSupply\"\n verbose_name_plural = \"Warehouses\"\n\nclass Product(models.Model):\n STATUS_CHOICES = (\n (\"ACTIVO\", \"Activo\"),\n (\"INACTIVO\", \"Inactivo\")\n )\n\n name = models.CharField(max_length=256)\n description = models.TextField(max_length=256)\n status = models.CharField(\n choices=STATUS_CHOICES,\n max_length=100,\n verbose_name=\"Status of the product\",\n )\n category = models.ForeignKey(\n Category, related_name=\"category\", on_delete=models.CASCADE, db_column='category')\n warehouse = models.ForeignKey(\n Warehouse, related_name=\"warehouse\", on_delete=models.CASCADE, db_column='warehouse', default=None)\n ruleSupply = models.ForeignKey(\n RuleSupply, related_name=\"ruleSupply\", on_delete=models.CASCADE, db_column='ruleSupply', default= None, null=True, blank=True)\n price = models.FloatField(default=0)\n priceBuy = models.FloatField(default=0)\n quantity= models.FloatField(default=0)\n image = models.ImageField(upload_to='product_images/', default='default_image.jpg')\n\n\n class Meta:\n db_table = \"Product\"\n\n def __str__(self):\n return self.name\n\n def to_json(self):\n item = model_to_dict(self)\n item['id'] = self.id\n item['text'] = self.name\n item['category'] = self.category.name\n item['warehouse'] = self.warehouse.name\n item['ruleSupply'] = self.id\n item['quantity'] = 1\n item['total_product'] = 0\n return item","repo_name":"EdwuardM/pg2","sub_path":"DjangoMIS/Apps/products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37666909550","text":"\nfrom .talk import TinyCoinTalk\n\n\nclass TinyCoinReceive(TinyCoinTalk):\n '''\n {Bit,Lite,Doge,...}coin payment accepting using the TinyCoinTalk RPC client\n that talks to the {}coind RPC server.\n\n Based on an arbitrary identifier (transaction_id), it can \n - create a payment address\n - detect if the payment has been completed\n \n It doesn't save anything on disk by itself, leaving the database managenet\n to the {}coind server.\n '''\n\n def _check_type(self, transaction_id):\n if not isinstance(transaction_id, str):\n raise ValueError( ('Address label (transaction ID) is not a '\n 'string but its type is {}').format(type(transaction_id)))\n\n \n def _get_address(self, transaction_id):\n '''\n Returns\n -------\n address : string\n The {}coin address matching the label, if existing.\n If not existing, returns an empty string.\n '''\n try:\n response = self.call('getaddressesbylabel', [transaction_id])\n except Exception as e:\n # This is the case no transaction IDs (RPC server returns code 500)\n return ''\n \n if len(response) > 1:\n raise ValueError(\n ('More than 1 labels matching id {}: Got {}'\n ).format(transaction_id, response))\n \n return list(response.keys())[0]\n\n\n\n def get_payment_address(self, transaction_id):\n '''Gives the coin address matching the given transaction_id.\n\n Checks if there is an wallet address with the label \"transaction_id\",\n creates one if not, and returns the payment address.\n \n Parameters\n ----------\n transaction_id : string\n Identifier of the transaction\n\n Returns\n -------\n address : string\n The public key address for receiving the user payment.\n '''\n self._check_type(transaction_id)\n \n address = self._get_address(transaction_id)\n if not address:\n address = self.call('getnewaddress', [transaction_id])\n\n return address\n\n\n def payment_completed(self, transaction_id, coin_amount):\n '''Checks if the transaction_id has been completed\n\n Parameters\n ----------\n transaction_id : string\n Identifier of the transaction\n coin_amount : numerical\n The coin amount (basic units) required for the payment\n to be considered completed.\n\n Returns\n -------\n completed : bool\n If True, the payment has been completed. Otherwise, False.\n amount : numerical\n Returns the amount paid, in the basic units (DOGE, LTC, BTC, ...)\n '''\n self._check_type(transaction_id)\n \n address = self._get_address(transaction_id)\n if address:\n amount = self.call('getreceivedbyaddress', [address])\n completed = bool(amount and amount > required_amount)\n else:\n raise ValueError('Cannot find the address matching the transaction_id')\n\n return completed, amount\n\n\n","repo_name":"bigcoinboy/tinycoinlib","sub_path":"src/tinycoinlib/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"41341893397","text":"from proxy import fetch_proxies, fetch_proxies_one, fetch_proxies_two, fetch_proxies_three\nfrom bs4 import BeautifulSoup\nimport requests\nimport concurrent.futures\nimport pandas as pd\nimport datetime\n\nNUM_THREADS = 10\n\ncustId = '176165485'\nCategoriesWithSub = []\nCategoriesWithOutSub = []\nproducts_data = []\n\n\ndef getMoreCategory(link):\n try:\n responseCategory = fetch_proxies(link)\n if not responseCategory:\n responseCategory = fetch_proxies_one(link)\n if not responseCategory:\n responseCategory = fetch_proxies_two(link)\n if not responseCategory:\n responseCategory = fetch_proxies_three(link)\n\n CategorySoup = BeautifulSoup(responseCategory.content, 'html.parser')\n CategoriesWithSub.remove(link)\n getLinkByCategory(CategorySoup)\n except:\n print(\"No more subcategories\")\n\ndef getTotalSold(name):\n try:\n response = fetch_proxies(f'https://www.mercadolibre.com.co/perfil/{name}')\n if not response:\n response = fetch_proxies_one(f'https://www.mercadolibre.com.co/perfil/{name}')\n if not response:\n response = fetch_proxies_two(f'https://www.mercadolibre.com.co/perfil/{name}')\n if not response:\n response = fetch_proxies_three(f'https://www.mercadolibre.com.co/perfil/{name}')\n \n SellerProfilesoup = BeautifulSoup(response.content, 'html.parser')\n infoSeller = SellerProfilesoup.find('p', {'class': 'seller-info__subtitle-sales'}).text\n products_data.append({'seller_info':infoSeller})\n\n except:\n print(\"Error on looking info\")\n\ndef getLinkByCategory(pageSoup:BeautifulSoup):\n\n titleCategory = pageSoup.find('div', {'class': 'ui-search-filter-dt-title shops-custom-primary-font'})\n title = titleCategory.get_text()\n if title == 'Categorías':\n\n listCategories = titleCategory.find_previous('div', {'class': 'ui-search-filter-dl shops__filter-items'})\n categories = listCategories.find_all('li', {'class': 'ui-search-filter-container shops__container-lists'})\n \n if (len(categories) > 9):\n linkMoreCategories = listCategories.find('a', {'class': 'ui-search-modal__link ui-search-modal--default ui-search-link'})['href'] \n try:\n res = fetch_proxies(linkMoreCategories)\n if res == None:\n res = fetch_proxies_one(linkMoreCategories)\n if res == None:\n res = fetch_proxies_two(linkMoreCategories)\n if res == None:\n res = fetch_proxies_three(linkMoreCategories)\n except:\n print('Error to get Link categories')\n\n CategoryPage = BeautifulSoup(res.content, 'html.parser')\n linkCategories = CategoryPage.find('div', {'class': 'ui-search-search-modal-grid-columns'})\n links = linkCategories.find_all('a', {'class': 'ui-search-search-modal-filter ui-search-link'})\n\n for linkCategory in links:\n try:\n\n res = fetch_proxies(linkCategory.get('href'))\n if not res:\n res = fetch_proxies_one(linkCategory.get('href'))\n if not res:\n res = fetch_proxies_two(linkCategory.get('href'))\n if not res:\n res = fetch_proxies_three(linkCategory.get('href'))\n\n\n page = BeautifulSoup(res.content, 'html.parser')\n itemsResult = int(page.find('span',{'class':'ui-search-search-result__quantity-results shops-custom-secondary-font'}).text.replace(' resultados', '').replace('.', ''))\n \n if itemsResult > 2000:\n CategoriesWithSub.append(linkCategories.get('href'))\n else:\n CategoriesWithOutSub.append(linkCategory.get('href'))\n\n except:\n print(\"Error to search link category\")\n else:\n for category in categories:\n linkCategory = category.find('a', {'class': 'ui-search-link'})['href']\n try:\n res = fetch_proxies(linkCategory)\n if not res:\n res = fetch_proxies_one(linkCategory)\n if not res:\n res = fetch_proxies_two(linkCategory)\n if not res:\n res = fetch_proxies_three(linkCategory)\n\n page = BeautifulSoup(res.content, 'html.parser')\n itemsResult = int(page.find('span',{'class':'ui-search-search-result__quantity-results shops-custom-secondary-font'}).text.replace(' resultados', '').replace('.', ''))\n if itemsResult > 2000:\n CategoriesWithSub.append(linkCategory)\n else:\n CategoriesWithOutSub.append(linkCategory)\n except Exception as e:\n print(\"Error to search link category 2\", e)\n\n if title == 'Precio':\n listCategories = titleCategory.find_previous('div', {'class': 'ui-search-filter-dl shops__filter-items'})\n categoriesByPrice = listCategories.find_all('li', {'class': 'ui-search-filter-container shops__container-lists'})\n \n for category in categoriesByPrice:\n listPrice = category.find('a', {'class': 'ui-search-link'})['href']\n try:\n res = fetch_proxies(listPrice)\n if not res:\n res = fetch_proxies_one(listPrice)\n if not res:\n res = fetch_proxies_two(listPrice)\n if not res:\n res = fetch_proxies_three(listPrice)\n \n page = BeautifulSoup(res.content, 'html.parser')\n itemsResult = int(page.find('span',{'class':'ui-search-search-result__quantity-results shops-custom-secondary-font'}).text.replace(' resultados', '').replace('.', ''))\n\n if itemsResult > 2000:\n CategoriesWithSub.append(listPrice)\n else:\n CategoriesWithOutSub.append(listPrice)\n except:\n print(\"Error to search link category\")\n if len(CategoriesWithSub) > 0:\n with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:\n executor.map(getMoreCategory, CategoriesWithSub)\n\ndef get_id(url:str) -> str:\n\n url_list = url.split('/')\n product_id = ''\n\n if 5 > len(url_list):\n product_id = url_list[3]\n product_id = product_id.split('-')\n product_id = product_id[0] + product_id[1]\n else:\n product_id = url_list[5]\n product_id = product_id.split('?')\n product_id = product_id[0]\n return product_id\n\ndef getInformationOlList(soup: BeautifulSoup):\n try:\n section = soup.find('section', {'class': 'ui-search-results ui-search-results--without-disclaimer shops__search-results'})\n rawItemList = section.find_all('li', {'class': 'ui-search-layout__item'})\n\n for item in rawItemList:\n try:\n with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:\n executor.submit(\n products_data.append({\n 'titles': item.find('h2', {'class': 'ui-search-item__title'}).text,\n 'prices': item.find('span', {'class': 'price-tag-fraction'}).text.replace(',', ''),\n 'urls': item.find('a', {'class': 'ui-search-item__group__element'})['href'],\n 'id_product': get_id(item.find('a', {'class': 'ui-search-item__group__element'})['href']),\n 'main_image': item.find('img', {'class': 'ui-search-result-image__element'})[\"data-src\"]\n })\n )\n except Exception as r:\n print(r)\n\n except Exception as e:\n print(\"Error get element Ol\", e)\n\ndef getInformation(soup: BeautifulSoup) -> None:\n try:\n rawItemList = soup.find_all('li', {'class': 'ui-search-layout__item'})\n\n for item in rawItemList:\n try:\n with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:\n executor.submit(\n products_data.append({\n 'titles': item.find('h2', {'class': 'ui-search-item__title'}).text,\n 'prices': item.find('span', {'class': 'price-tag-fraction'}).text.replace(',', ''),\n 'urls': item.find('a', {'class': 'ui-search-item__group__element'})['href'],\n 'id_product': get_id(item.find('a', {'class': 'ui-search-item__group__element'})['href']),\n 'main_image': item.find('img', {'class': 'ui-search-result-image__element'})[\"data-src\"]\n })\n )\n \n except Exception as r:\n print(r)\n except:\n getInformationOlList(soup)\n print(\"Error to get information\")\n\ndef pagination(nextPage, isNextPage, isFirstPage, soup): \n\n if not nextPage:\n return\n if isFirstPage == '1':\n getInformation(soup)\n\n if nextPage and isNextPage == 'Siguiente':\n try:\n responseNextPage = fetch_proxies(nextPage)\n if not responseNextPage:\n responseNextPage = fetch_proxies_one(nextPage)\n if not responseNextPage:\n responseNextPage = fetch_proxies_two(nextPage)\n if not responseNextPage:\n responseNextPage = fetch_proxies_three(nextPage)\n\n soupNextPage = BeautifulSoup(responseNextPage.content, 'html.parser')\n getInformation(soupNextPage)\n\n nextPageResult = soupNextPage.find('a', {'class': 'andes-pagination__link', 'title': 'Siguiente'})['href']\n isNextPageResult = soupNextPage.find('a', {'class': 'andes-pagination__link', 'title': 'Siguiente'})['title']\n if nextPageResult and isNextPageResult:\n pagination(nextPageResult, isNextPageResult, '0', soupNextPage)\n except:\n print(\"end code\")\n\ndef searchItems(link):\n try:\n responseCategory = fetch_proxies(link)\n if not responseCategory:\n responseCategory = fetch_proxies_one(link)\n if not responseCategory:\n responseCategory = fetch_proxies_two(link)\n if not responseCategory:\n responseCategory = fetch_proxies_three(link)\n\n CategorySoup = BeautifulSoup(responseCategory.content, 'html.parser')\n try:\n nextPage = CategorySoup.find('a', {'class': 'andes-pagination__link'})['href']\n isNextPage = CategorySoup.find('a', {'class': 'andes-pagination__link'})['title']\n isFirstPage = CategorySoup.find('span', {'class':'andes-pagination__link'}).text\n\n if nextPage and isNextPage:\n pagination(nextPage, isNextPage, isFirstPage, CategorySoup)\n except:\n getInformation(CategorySoup)\n \n except:\n print(\"Error to get items\")\n\ndef get_info(product):\n try:\n res = fetch_proxies(product['urls'])\n if not res:\n res = fetch_proxies_one(product['urls'])\n if not res:\n res = fetch_proxies_two(product['urls'])\n if not res:\n res = fetch_proxies_three(product['urls'])\n\n ItemSoup = BeautifulSoup(res.content, 'html.parser')\n try:\n countSold = ItemSoup.find('span', {'class': 'ui-pdp-subtitle'}).text\n product['sold'] = countSold[10:]\n except:\n pass\n \n try:\n descriptionContent = ItemSoup.find('p', {'class': 'ui-pdp-description__content'})\n description = descriptionContent.get_text()\n\n if len(description) >= 100:\n product['description'] = description[:99]\n else:\n product['description'] = description\n except:\n pass\n \n try:\n imagesContent = ItemSoup.find_all('div', {'class': 'ui-pdp-thumbnail__picture'})\n \n for item in imagesContent:\n images = item.find('img', {'class': 'ui-pdp-image'})\n desc = ''\n for num in images['alt']:\n images = item.find('img', {'class': 'ui-pdp-image'})\n desc += num\n\n first = images['data-src']\n if desc == 'Imagen 1':\n pass\n if desc == 'Imagen 2':\n product['second_image'] = first\n if desc == 'Imagen 3':\n product['third_image'] = first\n except:\n pass\n \n except Exception as e:\n pass\n\n\ndef main():\n y = datetime.datetime.now()\n print(y)\n print(\"Start scraping... please wait...\")\n try:\n response = fetch_proxies('https://listado.mercadolibre.com.co/_CustId_'+custId)\n\n soup = BeautifulSoup(response.content, 'html.parser')\n name_seler = soup.find('h1', {'class': 'ui-search-breadcrumb__title'}).text[17::].replace(' ', '+')\n\n getLinkByCategory(soup)\n getTotalSold(name_seler)\n \n with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:\n executor.map(searchItems, CategoriesWithOutSub)\n products_data.sort(reverse=True, key=lambda x:(len(x), repr(x)))\n \n for i in range(0, len(products_data)):\n try:\n if products_data[i+1]['id_product'] == products_data[i]['id_product']:\n products_data.remove(products_data[i])\n except:\n continue\n\n if len(products_data) % 5000 == 0:\n time_queries = int(len(products_data) / 5000)\n else:\n time_queries = int((len(products_data) // 5000) + 1)\n \n for i in range(0, time_queries):\n with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:\n executor.map(get_info, products_data[0:4999])\n \n frame = pd.DataFrame(products_data)\n frame.to_excel(f'{name_seler}.xlsx', index=False)\n\n except Exception as E:\n print(E)\n\n x = datetime.datetime.now()\n print(x)\nif __name__ == '__main__':\n main()","repo_name":"Jaynornj/MercadolibreScraper-Jay-updated1.0","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":14826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17667157666","text":"from django.shortcuts import redirect\n\n# Create your views here.\nfrom django.contrib.auth.views import LoginView, LogoutView \n# from django.urls import reverse_lazy\n\n\n# Create your views here.\n\nclass UserLoginView(LoginView):\n template_name = \"login/login.html\"\n\n def dispatch(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"/user/admin\")\n return super().dispatch(request, *args, **kwargs)\n\n\nclass UserLogout(LogoutView):\n template_name = \"login/logout.html\"\n\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n # pendiente la proxima funcion","repo_name":"Manuelzu17/PROYECTO_BACKEND_Django_Docker","sub_path":"loginU/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"42352494192","text":"from collections import Counter\nfrom typing import List, Tuple\n\nfrom Cluster.cluster import Cluster\nfrom resources import constant\n\n\nclass RelationMetrics:\n\n def __init__(self, cluster: Cluster):\n self._unique_relation_participants = {}\n self._value_per_relation = {}\n self._unique_relations_counter = Counter()\n self._cluster: Cluster = cluster\n\n def add_relation(self, relation):\n if relation.name not in self._unique_relation_participants:\n self._unique_relation_participants[relation.name] = set()\n self._value_per_relation[relation.name] = Counter()\n if relation.source not in self._unique_relation_participants[relation.name]:\n self._unique_relations_counter[relation.name] += 1\n self._unique_relation_participants[relation.name].add(relation.source)\n self._value_per_relation[relation.name][relation.target] += 1\n\n def top_relations(self, max_relations, min_occurrence_factor=0.3) -> List[Tuple[str, int]]:\n return list(filter(lambda x: x[1] > self.number_of_entities * min_occurrence_factor,\n self._unique_relations_counter.most_common(max_relations)))\n\n def top_values(self, relation_name, max_values, min_occurrence_factor=0.1) -> List[Tuple[str, int]]:\n return list(filter(lambda x: x[1] > self.number_of_entities * min_occurrence_factor,\n self._value_per_relation[relation_name].most_common(max_values)))\n\n def __str__(self):\n representation = []\n for relation, relation_count in self.top_relations(constant.MAX_NUMBER_OF_RELATIONS_PER_CLUSTER):\n relation_percentage = round(relation_count / self.number_of_entities * 100, 2)\n representation.append(f\"Relation: {relation} {relation_percentage}%\")\n for value, value_count in self.top_values(relation, constant.MAX_NUMBER_OF_VALUES_PER_RELATION):\n value_percentage = round(value_count / self.number_of_entities * 100, 2)\n representation.append(\"\\t↳ {:5.2f}% {}\".format(value_percentage, value))\n return \"\\n\".join(representation)\n\n @property\n def number_of_entities(self) -> int:\n return len(self._cluster.entities)\n\n def to_json_object(self) -> object:\n\n def top_relations_json_object() -> object:\n\n def relation_json_object(relation: Tuple[str, int]) -> object:\n\n def relation_value_json_object(value: Tuple[str, int], entities_with_relation: int) -> object:\n return {\n \"name\": value[0],\n \"absolute_occurrence\": value[0],\n \"relative_occurrence\": \"{:5.2f}%\".format(value[1] / entities_with_relation * 100)\n }\n\n top_values: List[object] = []\n for _value in self.top_values(relation[0], constant.MAX_NUMBER_OF_VALUES_PER_RELATION):\n top_values.append(relation_value_json_object(_value, relation[1]))\n\n return {\n \"name\": relation[0],\n \"absolute_occurrence\": relation[1],\n \"relative_occurrence\": \"{:5.2f}%\".format(relation[1] / self.number_of_entities * 100),\n \"top_values\": top_values\n }\n\n top_relations: List[object] = []\n for _relation in self.top_relations(constant.MAX_NUMBER_OF_RELATIONS_PER_CLUSTER):\n top_relations.append(relation_json_object(_relation))\n return top_relations\n\n return {\n \"cluster\": self._cluster.to_json_object(),\n \"top_relations\": top_relations_json_object()\n }\n","repo_name":"mpss2019fn1/cluster_interpreter","sub_path":"Relation/relation_metrics.py","file_name":"relation_metrics.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70334019718","text":"import urllib.request\nimport tempfile\nimport tarfile\nimport shutil\nimport sys\nimport os\nimport re\n\nclass SourceCodeManager(object):\n YOUTUBE_DL_MASTER = \"https://raw.github.com/rg3/youtube-dl/master\"\n YOUTUBE_DL_VERSION_URL = YOUTUBE_DL_MASTER + \"/youtube_dl/version.py\"\n\n GITHUB_REPOS_API = \"https://api.github.com/repos\"\n YOUTUBE_DL_TARBALL_URL = GITHUB_REPOS_API + \"/rg3/youtube-dl/tarball\"\n\n BLOCKING_TIMEOUT = 5\n def __init__(self, prefs):\n \"\"\"\n prefs - An instance of Preference\n \"\"\"\n self._preferences = prefs\n\n def youtubeDLSourceFolder(self):\n return self._preferences.sourcepath + \"/youtube-dl\"\n\n def youtubeDLIsInstalled(self):\n try:\n self.currentYoutubeDLVersion()\n except FileNotFoundError:\n return False\n return True\n\n def addYoutubeDLToPath(self):\n sys.path.append(self.youtubeDLSourceFolder())\n \n def currentYoutubeDLVersion(self):\n \"\"\"Returns the version of youtube-dl installed in the system\"\"\"\n versionFile = self.youtubeDLSourceFolder() + \"/youtube_dl/version.py\"\n\n fh = open(versionFile, \"r\")\n try:\n source = fh.read().strip()\n return self.__extractYoutubeDLVersionFromString(source) \n\n except FileNotFoundError:\n return None\n finally:\n fh.close()\n \n\n def checkForYoutubeDLUpdates(self):\n \"\"\"Checks for updates only when enabled in the preferences.\n\n Raises exceptions on error:\n - urllib.request.URLError\n - ExtractionError: unable to extract the version string\n\n Return value: True or False\n \"\"\"\n if self._preferences.autoupdates == False:\n return False\n\n # check for youtube-dl updates\n fh = urllib.request.urlopen(self.YOUTUBE_DL_VERSION_URL, timeout=5)\n try:\n html = fh.read().decode().strip()\n newVersion = self.__extractYoutubeDLVersionFromString(html)\n \n finally:\n fh.close()\n\n try:\n currentVersion = self.currentYoutubeDLVersion()\n except VersionExtractionError:\n return True\n\n return (currentVersion != newVersion)\n\n def __extractYoutubeDLVersionFromString(self, s):\n \"\"\"Extracts the youtube-dl version string from s.\n\n An example of a version string is 2014.01.08\n\n This method raises an VersionExtractionError when the version\n couldn't be extracted.\n \"\"\"\n m = re.search('((?:\\d+\\.){2}\\d+)',s)\n if m == None:\n raise VersionExtractionError()\n return m.group(0)\n\n def downloadYoutubeDL(self):\n \"\"\"Downloads and updates the youtube-dl source code on the system\n\n Raises exceptions on error:\n - urllib.request.URLError\n \"\"\"\n\n # download to tmp file\n tmp = tempfile.mkstemp()[1]\n fh = open(tmp, 'wb')\n try:\n req = urllib.request.urlopen(self.YOUTUBE_DL_TARBALL_URL, timeout=5)\n chunkSize = 256*10240\n while True:\n chunk = req.read(chunkSize)\n if not chunk:\n break\n fh.write(chunk)\n\n\n except Exception as e:\n fh.close()\n os.remove(tmp)\n raise e\n\n # read in tarball\n try:\n tf = tarfile.open(name=tmp)\n extractDir = tempfile.mkdtemp()\n tf.extractall(path=extractDir)\n\n newCode = extractDir + \"/\" + tf.getmembers()[0].name\n tf.close()\n finally:\n os.remove(tmp)\n\n # delete the previous code and move the new code\n try:\n os.makedirs(self.youtubeDLSourceFolder(), 0o755)\n except:\n pass\n oldCode = self.youtubeDLSourceFolder()\n try:\n shutil.rmtree(oldCode)\n except FileNotFoundError:\n pass\n shutil.move(newCode, oldCode)\n \n\n\n### Exceptions ###\nclass VersionExtractionError(Exception):\n def __init__(self):\n super(Exception, self).__init__(\"could not extract version\")\n \n","repo_name":"hverr/youtube-dl-manager","sub_path":"youtube-dl-manager/SourceCodeManager.py","file_name":"SourceCodeManager.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9697171864","text":"import whisper\n\ndef transcribe(data):\n print(data)\n url = data['url']\n jobID = data['jobID']\n model = whisper.load_model(\"tiny.en\")\n #result = model.transcribe(\"./test/micro-machines.wav\")\n result = model.transcribe(url)\n transcription = result[\"text\"]\n with open(f\"assets/{jobID}.txt\", \"w\") as transcriptFile:\n # Writing data to a file\n transcriptFile.write(transcription)","repo_name":"monismehdi/pyWhisper","sub_path":"transcriber.py","file_name":"transcriber.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"859574990","text":"\"\"\"\nPayment related notifications.\n\"\"\"\nfrom rest_framework.reverse import reverse\nfrom notifications.models import Notification\n\n\ndef pay_later_reminder(payment, business_id, request, format=None):\n \"\"\"\n Create a reminder notification for payments with\n mode of payments `CREDIT`.\n \"\"\"\n customer_name = payment.order.customer.name\n action_url = reverse(\n 'business:payment-detail',\n request=request,\n format=format,\n kwargs={'business_id': business_id, 'pk': payment.pk}\n )\n\n Notification.objects.get_or_create(\n notification_type='Payment Reminder',\n business_account=payment.order.business_account,\n action_message=f'Receive payment for your order to {customer_name}',\n action_date=payment.pay_later_date,\n action_date_label='Payment due date',\n action_url=action_url,\n is_seen=False\n )\n","repo_name":"eyobofficial/Bookkeeping-API","sub_path":"notifications/helpers/payment_notifications.py","file_name":"payment_notifications.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"42156148881","text":"import socket \r\nimport threading \r\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n#参数1:可以有AF_INET(基于ip,不同服务器进行通信)\r\n# AF_UNIX(基于文件,在本机通信)\r\n#参数2:可以有SOCK_STREAM(tcp/ip协议)\r\n# SOCK_DGRAM (udp协议)\r\ns.bind((\"127.0.0.1\",8088)) #绑定端口和ip\r\ns.listen(10) #请求队列为10\r\nsock,addr=s.accept() \r\ntemp=True \r\ndef rec(sock): \r\n global temp #设置全局变量\r\n while temp: \r\n t=sock.recv(1024).decode('utf8') #函数的核心语句就一条接收方法\r\n #接受1024个数据\r\n if t == \"quit\": \r\n temp=False \r\n print(t) \r\ntrd=threading.Thread(target=rec,args=(sock,)) #创建一个线程\r\n#target方法名 args参数名\r\ntrd.start() \r\nwhile temp: \r\n t=input() \r\n sock.send(t.encode('utf8')) \r\n if t == \"quit\": \r\n temp=False \r\ns.close() \r\n","repo_name":"plusyou13/socket_nc","sub_path":"聊天_服务器.py","file_name":"聊天_服务器.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1987118630","text":"import argparse\nfrom dataclasses import dataclass\nfrom experiments.common.setup_experiment import setup_experiment, flush_logs, get_value_logger\nfrom experiments.problems import all_problems, BaseProblem\nfrom core.constraints import BaseConstraint, BoxConstraint\nfrom core.flow.real_nvp import RealNvp\nfrom core.flow.train_flow import update_flow_batch\nfrom core.flow.constrained_distribution import ConstrainedDistribution\nfrom core.common.loadable_module import LoadbleModule\nimport torch as th\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.autograd import Variable\nimport numpy as np\nimport os\n\nclass Generator(nn.Module, LoadbleModule):\n def __init__(self, latent_dim, conditional_param_count):\n self.kwargs = {\"conditional_param_count\": conditional_param_count, \"latent_dim\": latent_dim}\n self.latent_dim = latent_dim\n super(Generator, self).__init__()\n\n def block(in_feat, out_feat, normalize=True):\n layers = [nn.Linear(in_feat, out_feat)]\n if normalize:\n layers.append(nn.BatchNorm1d(out_feat, 0.8))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *block(latent_dim+conditional_param_count, 32, normalize=False),\n *block(32, 64),\n nn.Linear(64, latent_dim),\n nn.Tanh()\n )\n\n def forward(self, z):\n return self.model(z)\n\n\n \nclass Discriminator(nn.Module):\n def __init__(self, latent_dim, conditional_param_count):\n super(Discriminator, self).__init__()\n self.kwargs = {\"conditional_param_count\": conditional_param_count, \"latent_dim\": latent_dim}\n self.model = nn.Sequential(\n nn.Linear(latent_dim + conditional_param_count, 32),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(32, 64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(64, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, sample):\n return self.model(sample)\n\n\n\ndef main():\n \"\"\"\n Train wgan using generated samples from a file.\n \"\"\"\n \n @dataclass\n class Options:\n problem: str\n data_file: str\n train_sample_count: int = 500_000\n test_sample_count: int = 500_000\n epochs: int = 500\n eval_freq: int = 1\n device: str = 'cpu'\n lr: float = 1e-5\n batch_size: int = 256\n n_critic: int = 10\n sample_interval: float = 10.\n gradient_clip_value: float = 10\n\n args = setup_experiment(\"train_wgan\", Options)\n params: Options = args.params\n\n # Get the constraint\n problem:BaseProblem = all_problems[params.problem]\n constraint = problem.constraint\n constraint = constraint.to(params.device)\n conditional_p_count = getattr(constraint, 'conditional_param_count', 0)\n latent_dim = constraint.var_count\n logger = get_value_logger(args.log_dir)\n \n \n\n\n generator = Generator(latent_dim, conditional_p_count).to(device=params.device)\n discriminator = Discriminator(latent_dim, conditional_p_count).to(device=params.device)\n\n # Optimizers\n optimizer_G = th.optim.RMSprop(generator.parameters(), lr=params.lr)\n optimizer_D = th.optim.RMSprop(discriminator.parameters(), lr=params.lr)\n\n \n # Load dataset\n data = th.from_numpy(np.load(params.data_file)).to(params.device).to(th.float32)\n if params.train_sample_count > len(data):\n raise ValueError(\"Not enough samples in the dataset\")\n train_data = data[:params.train_sample_count]\n dataset = TensorDataset(train_data)\n data_loader = DataLoader(dataset, batch_size=params.batch_size, shuffle=True)\n test_data = data[params.train_sample_count:params.train_sample_count + params.test_sample_count]\n os.makedirs(args.log_dir + \"/figures\", exist_ok=True)\n np.save(f\"{args.log_dir}/figures/test_data.npy\", test_data.cpu().numpy())\n print(\"Test data\", len(test_data))\n batches_done = 0\n for epoch in range(params.epochs):\n losses_G = []\n losses_D = []\n # Update flow for each batch\n for i, (batch,) in enumerate(data_loader):\n\n optimizer_D.zero_grad()\n\n # Sample noise as generator input\n z = Variable(th.Tensor(np.random.uniform(-1, 1, (batch.shape[0], latent_dim), ),).to(params.device))\n\n # Generate a batch of images\n conditional_vars = batch[:, constraint.var_count:] # Get conditional variables for generator\n z = th.concat([z, conditional_vars], dim=1)\n fake_samples = generator(z).detach()\n fake_samples = th.concat([fake_samples, conditional_vars], dim=1)\n # Adversarial loss\n loss_D = -th.mean(discriminator(batch)) + th.mean(discriminator(fake_samples))\n\n loss_D.backward()\n optimizer_D.step()\n losses_D.append(loss_D.item())\n\n # Clip weights of discriminator\n for p in discriminator.parameters():\n p.data.clamp_(-params.gradient_clip_value, params.gradient_clip_value)\n\n # Train the generator every n_critic iterations\n if i % params.n_critic == 0:\n optimizer_G.zero_grad()\n # Generate a batch of samples\n gen_samples = generator(z)\n gen_samples = th.concat([gen_samples, conditional_vars], dim=1)\n # Adversarial loss\n loss_G = -th.mean(discriminator(gen_samples))\n\n loss_G.backward()\n optimizer_G.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, params.epochs, batches_done % len(data_loader), len(data_loader), loss_D.item(), loss_G.item())\n ) \n\n\n losses_G.append(loss_G.item())\n batches_done += 1\n\n if (epoch+1)%params.eval_freq == 0:\n # Evaluate\n with th.no_grad():\n # Calculate accuracy (z -(g)-> x)\n z = Variable(th.Tensor(np.random.uniform(-1, 1, (params.test_sample_count, latent_dim)))).to(params.device)\n conditional_vars = test_data[:, constraint.var_count:]\n z = th.concat([z, conditional_vars], dim=1)\n generated_samples_actions = generator(z)\n generated_samples = th.concat([generated_samples_actions, conditional_vars], dim=1)\n validity = constraint.is_feasible(generated_samples.double())\n valid_count = validity.int().sum().item()\n accuracy = valid_count/len(validity)\n fig = problem.plot(generated_samples)\n fig.savefig(f\"{args.log_dir}/figures/{epoch+1}.png\")\n np.save(f\"{args.log_dir}/figures/{epoch+1}.npy\", generated_samples.cpu().numpy())\n\n # sinkhorn_loss = sinkhorn_loss_func(generated_samples_actions, test_data[:, :problem.var_count]).mean().item()\n # Cannot calculate recall\n\n # print(f\"Epoch: {epoch+1}: Descriminator mean loss: {np.mean(losses_D):.4f}, Generator mean loss: {np.mean(losses_G):.4f}, Acc: {accuracy*100: .2f}%, \"\")\n logger.record(\"train/mean_d_loss\", np.mean(losses_D))\n logger.record(\"train/mean_g_loss\", np.mean(losses_G))\n logger.record(\"train/accuracy\", accuracy)\n logger.record(\"train/epoch\", epoch+1)\n logger.dump(epoch)\n generator.save_module(f\"{args.log_dir}/generator.pt\")\n flush_logs()\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"rlr-smu/flow-pg","sub_path":"experiments/train_wgan.py","file_name":"train_wgan.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10178730752","text":"import csv\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\n\nSTART = datetime(year=2016, month=5, day=2)\nEND = datetime(year=2016, month=5, day=10)\n\nPURCHASES_FILENAME = 'purchases.csv'\nINSTALLS_FILENAME = 'installs.csv'\nTS_FORMAT = '%Y-%m-%d %H:%M:%S'\nAPP_TYPE_2 = '2'\n\nINSTALL_TS_IDX = 0\nINSTALL_APP_IDX = 1\nINSTALL_COUNTRY_IDX = 2\n\nPURCHASE_TS_IDX = 0\nPURCHASE_APP_IDX = 1\nPURCHASE_COUNTRY_IDX = 2\nPURCHASE_INSTALL_TS_IDX = 3\nPURCHASE_REVENUE_IDX = 4\n\ninstalls = defaultdict(int)\n\nwith open(INSTALLS_FILENAME) as installs_csv:\n i_reader = csv.reader(installs_csv, delimiter=',')\n\n next(i_reader) # skip the header\n for row in i_reader:\n install_ts = datetime.strptime(row[INSTALL_TS_IDX], TS_FORMAT)\n if START <= install_ts < END and row[INSTALL_APP_IDX] == APP_TYPE_2:\n installs[row[INSTALL_COUNTRY_IDX]] += 1\n\n# revenue = dict.fromkeys(range(1,11), defaultdict(int))\nrevenue = {i: defaultdict(int) for i in range(1, 11)}\nwith open(PURCHASES_FILENAME) as purchases_csv:\n p_reader = csv.reader(purchases_csv, delimiter=',')\n\n next(p_reader) # skip the header\n for row in p_reader:\n install_ts = datetime.strptime(row[PURCHASE_INSTALL_TS_IDX], TS_FORMAT)\n if START <= install_ts < END and row[1] == APP_TYPE_2:\n revenue_ts = datetime.strptime(row[PURCHASE_TS_IDX], TS_FORMAT)\n for i in range(10, 0, -1):\n if timedelta(days=i-1) < revenue_ts-install_ts <= timedelta(days=i):\n for j in range(10, i-1, -1):\n revenue[j][row[PURCHASE_COUNTRY_IDX]] += float(row[PURCHASE_REVENUE_IDX])\n\nwith open('result.csv', 'w') as result:\n result_writer = csv.writer(result, delimiter=',')\n result_writer.writerow(['country', 'installs'] + ['RPI{}'.format(i) for i in range(1, 11)])\n for country in installs:\n installs_per_country = installs[country]\n rpi_1_10 = [revenue[i][country]/installs_per_country for i in range(1, 11)]\n result_writer.writerow([country, installs_per_country]+rpi_1_10)\n","repo_name":"AlexDobrushskiy/playrix","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2032130490","text":"\"\"\"\n Testing the folder\n\"\"\"\nimport os\nimport unittest\n\nfrom source import ROOT_DIR\nfrom source.reader.file import file_to_str, str_to_file\nfrom source.reader.get_content import get_content\nfrom source.reader.get_folder import get_folder_of_file\n\n\nclass MyTestCase(unittest.TestCase):\n def test_str_to_file(self):\n test_path = ROOT_DIR + \"/delete.html\"\n content = \"test\"\n str_to_file(test_path, content)\n new_content = file_to_str(test_path)\n self.assertEqual(new_content, content)\n os.remove(test_path)\n\n test_path = ROOT_DIR + \"/fake/\"\n # noinspection PyTypeChecker\n with self.assertRaises(SystemExit) as cm:\n content = \"test\"\n str_to_file(test_path, content)\n\n # noinspection PyUnresolvedReferences\n self.assertEqual(cm.exception.code, 1)\n\n def test_read_file(self):\n test_path = ROOT_DIR + \"/html_tests/test.html\"\n content = file_to_str(test_path)\n self.assertTrue(\"\" in content)\n\n test_path = ROOT_DIR + \"/html_tests/tes.html\"\n # noinspection PyTypeChecker\n with self.assertRaises(SystemExit) as cm:\n file_to_str(test_path)\n\n # noinspection PyUnresolvedReferences\n self.assertEqual(cm.exception.code, 1)\n\n self.assertEqual(file_to_str(test_path, False), None)\n\n def test_path(self):\n folder = ROOT_DIR + \"/html_tests\"\n test_path = folder + \"/test.html\"\n folder_result = get_folder_of_file(test_path)\n self.assertEqual(folder, folder_result)\n\n folder = \"html_tests\"\n test_path = folder + \"/test.html\"\n folder_result = get_folder_of_file(test_path)\n self.assertEqual(folder, folder_result)\n\n def test_get_content(self):\n path_root = ROOT_DIR + \"/html_tests/test.html\"\n root_dir = os.path.dirname(\n path_root\n )\n js_path = root_dir + \"/js/test.j\"\n content = get_content(js_path, root_dir)\n self.assertEqual(content, None)\n js_path = root_dir + \"/js/test.js\"\n content = get_content(js_path, root_dir)\n self.assertNotEqual(content, None)\n js_path = \"js/test.j\"\n content = get_content(js_path, root_dir)\n self.assertEqual(content, None)\n\n js_path = \"js/test.js\"\n content = get_content(js_path, root_dir)\n self.assertNotEqual(content, None)\n","repo_name":"moranabadie/OneFileOnly","sub_path":"source/reader/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4936900084","text":"#!/usr/bin/python\n\ndef iscomplextype(atype):\n return atype.find('Complex') >= 0\n\ndef isintegertype(atype):\n return atype.find('Int') >= 0\n\ndef issingletype(atype):\n return atype.find('Single') >= 0\n\ndef computeTypes(atype,btype):\n outputcomplex = iscomplextype(atype) or iscomplextype(btype)\n singleprecisioncase = issingletype(atype) and issingletype(btype)\n if (singleprecisioncase):\n if (outputcomplex):\n viatype = 'ComplexSingle'\n else:\n viatype = 'Single'\n else:\n if (outputcomplex):\n viatype = 'ComplexDouble'\n else:\n viatype = 'Double'\n \n\ntypelist = ['Double','ComplexDouble','Single','ComplexSingle','UInt64','ComplexUInt64']\ntypemap = {}\nfor t in typelist:\n typemap[t] = str.lower(t)\n\nf = open(\"DotOperator.hpp\",\"w\")\nf.write('template \\n')\nf.write('FMObject3 DotOperator(const FMObject3 &a, const FMObject3 &b) {\\n')\nf.write(' if (a.type() == b.type()) {\\n')\nf.write(' FMObject3 c = a.type()->zeroArrayOfSize(FMTuple::computeDotOperatorSize(a.dims(),b.dims()));\\n')\nf.write(' switch (a.type()->code())\\n')\nf.write(' {\\n')\nfor t in typelist:\n inputType = t\n outputType = t\n f.write(' case ' + t + ':\\n')\n f.write(' Op::template func_st<'+typemap[t]+'>(c.readWriteData(),a.readOnlyData(),b.readOnlyData(),a.isScalar(),b.isScalar(),c.elementCount());\\n')\n f.write(' break;\\n')\nf.write(' }\\n')\nf.write(' }\\n')\nf.write(' if (a.type()->code() == Double) {\\n')\nf.write(' FMObject3 c = b.type()->zeroArrayOfSize(FMTuple::computeDotOperatorSize(a.dims(),b.dims()));\\n')\nf.write(' switch (a.type()->code())\\n')\nf.write(' {\\n')\nfor t in typelist:\n inputType = t\n outputType = t\n f.write(' case ' + t + ':\\n')\n f.write(' Op::template func_st<'+typemap[t]+'>(c.readWriteData(),a.readOnlyData(),b.readOnlyData(),a.isScalar(),b.isScalar(),c.elementCount());\\n')\n f.write(' break;\\n')\nf.write(' }\\n')\nf.write(' }\\n')\n\nf.write('}\\n')\n","repo_name":"ParasInternKhushPatil/FreeMaT","sub_path":"libs/libFreeMat/dotop.py","file_name":"dotop.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25652536289","text":"import os\nimport cv2\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add('--hsv_low', help='Comma separated HSV LOW values after calibration', default='0,48,176')\nparser.add('--hsv_high', help='Comma separated HSV HIGH values after calibration', default='17,255,255')\nparser.add('--offset_x', help='Sensitivity in the X direction (in pixels)', default='0')\nparser.add('--offset_y', help='Sensitivity in the Y direction (in pixels)', default='0')\nargs = parser.parse_args()\n\nOUTPUT_PATH = os.path.join(os.getcwd(), 'Output')\nHSV_LOW = [int(i) for i in args.hsv_low.split(',')]\nHSV_HIGH = [int(i) for i in args.hsv_high.split(',')]\n\nif __name__ == '__main__':\n\n def rectangle_values(frame_center, frame_xy, factor):\n\n top_left = int((frame_xy[0] / factor) * 1.4)\n top_right = int((frame_xy[1] / factor) * 1.4)\n bottom_left = 2*frame_center[0] - top_left\n bottom_right = 2*frame_center[1] - top_right\n\n return top_left, top_right, bottom_left, bottom_right\n\n # Creating output folder if it doesn't exist\n if not os.path.exists(OUTPUT_PATH):\n os.mkdir(OUTPUT_PATH)\n\n # Capture video from the stream\n cap = cv2.VideoCapture(0)\n\n frame_center = (320, 240)\n frame_xy = (640, 480)\n offset_x = int(args.offset_x)\n offset_y = int(args.offset_y)\n count = 0\n\n while(1):\n\n _, frame=cap.read()\n \n # convert from a BGR stream to an HSV stream\n hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Enter the calibrated values in the below function. \n # Replace (0, 48, 176) by new LOW values and (17, 255, 255) by new HIGH values.\n\n mask = cv2.inRange(hsv, HSV_LOW, HSV_HIGH) \n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2) \n\n contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n string = ''\n\n # Guide rectangles\n cv2.circle(frame, frame_center, 5, (255, 255, 255), -1)\n top_leftx, top_lefty, bottom_rightx, bottom_righty = rectangle_values(frame_center, frame_xy, factor = 2.3 * 2)\n breadth_small = frame_xy[1] - 2 * top_lefty\n cv2.rectangle(frame, (top_leftx, top_lefty), (bottom_rightx, bottom_righty), (255, 255, 255), 3)\n\n top_leftx, top_lefty, bottom_rightx, bottom_righty = rectangle_values(frame_center, frame_xy, factor = 3.5 * 2)\n breadth_large = frame_xy[1] - 2 * top_lefty\n cv2.rectangle(frame, (top_leftx, top_lefty), (bottom_rightx, bottom_righty), (255, 255, 255), 3)\n\n # only proceed if at least one contour was found\n if len(contours) > 0:\n\n # Finding the largest contour\n c = max(contours, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n \n # (x,y) are the coordinates of the circle's center\n # We only proceed further if the radius is greater than 10\n\n if radius > 10:\n cv2.circle(frame, (int(x), int(y)), int(radius), (0, 0, 255), 2)\n cv2.circle(frame, (int(x),int(y)), 5, (0, 255, 0), -1)\n\n # Movement calculations\n offset_x = frame_center[0] - x\n offset_y = frame_center[1] - y\n\n # Here, we set a threshold displacement of 20\n if(abs(offset_x) > 20):\n if(offset_x < 0):\n string += 'RIGHT '\n elif(offset_x > 0):\n string += 'LEFT '\n\n if(abs(offset_y) > 20):\n if(offset_y < 0):\n string += 'UP '\n elif(offset_y > 0):\n string += 'DOWN '\n\n # Move front/back wrt ball's diameter\n if(2 * radius > breadth_large):\n string += 'BACK '\n elif(2 * radius < breadth_small):\n string += 'FRONT '\n\n cv2.putText(frame, string, (10,450), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 215, 255), 2, cv2.LINE_AA)\n\n cv2.imshow('Track', frame)\n cv2.imwrite(os.path.join(OUTPUT_PATH, str(count) + '.jpg'), frame)\n count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()","repo_name":"thatbrguy/Ball-Tracking-Bot","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"17888529080","text":"import pytest\nimport xmlsec\n\nfrom inge6.saml import AuthNRequest, ArtifactResolveRequest\nfrom inge6.saml.metadata import SPMetadata\n\n\ndef test_artifact_value():\n expected = \"some_artifact_code\"\n saml_req = ArtifactResolveRequest(expected, sso_url='test_url', issuer_id='test_id')\n artifact_node = saml_req.root.find('.//samlp:Artifact', {'samlp': 'urn:oasis:names:tc:SAML:2.0:protocol'})\n\n assert artifact_node.text == expected\n\n@pytest.mark.parametrize(\"saml_request\", [\n AuthNRequest(sso_url='test_url', issuer_id='test_id'),\n ArtifactResolveRequest('some_artifact_code', sso_url='test_url', issuer_id='test_id'),\n SPMetadata()])\ndef test_verify_requests(saml_request):\n getroot =saml_request.saml_elem\n # xmlsec.tree.add_ids(getroot, [\"ID\"])\n signature_node = xmlsec.tree.find_node(getroot, xmlsec.constants.NodeSignature)\n # Create a digital signature context (no key manager is needed).\n ctx = xmlsec.SignatureContext()\n key = xmlsec.Key.from_file('saml/certs/sp.crt', xmlsec.constants.KeyDataFormatCertPem)\n # Set the key on the context.\n ctx.key = key\n ctx.register_id(getroot)\n ctx.verify(signature_node)\n assert True\n","repo_name":"minvws/nl-covid19-coronacheck-authentication-service","sub_path":"tests/saml/test_saml_request_builder.py","file_name":"test_saml_request_builder.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70159605706","text":"import copy\nimport datetime\nimport socket\nimport subprocess\nimport threading\nimport time\n\ntry:\n from queue import Queue\nexcept ImportError:\n from Queue import Queue\n\nimport plumbum\nimport rpyc\nfrom sqlalchemy import sql\nfrom zeroconf import ServiceBrowser, Zeroconf\n\nimport config\nfrom web import db_schema\nfrom common import auto_restart, get_routed_ip, max_mtime\n\n\nclass Box(object):\n def __init__(self, name, ip, port, properties, deregister_callback):\n self.name = name # name of remote box\n self.ip = ip # IP address\n self.port = port # port on which atles_remote.py is accepting connections\n\n # callback function for this box to deregister itself w/ zeroconf\n self.deregister_callback = deregister_callback\n\n # information on git commit status for remote code\n self.gitshort = properties[b'gitshort'].decode()\n self.gitlong = properties[b'gitlong'].decode()\n\n # does this box have a display for \"background images\"\n self.hasdisplay = properties[b'hasdisplay']\n # username for SSH login to remote box\n self.user = properties[b'user'].decode()\n\n # paths to track data directories on remote box\n self.appdir = properties[b'appdir'].decode()\n # build useful paths (assumes same directory structure on remote)\n self.trackdir = self.appdir / config.TRACKDIR.relative_to(config.BASEDIR)\n self.archivedir = self.appdir / config.ARCHIVEDIR.relative_to(config.BASEDIR)\n self.dbgframedir = self.appdir / config.DBGFRAMEDIR.relative_to(config.BASEDIR)\n\n self.error = None # Internal error message, if any\n self.local = None # True if box is actually the local machine\n\n self._tunnel = None # SSH tunnel instance\n self._rpc = None # RPC connection instance\n\n def get_info(self):\n ret = {\n 'name': self.name,\n 'ip': self.ip,\n 'port': self.port,\n 'user': self.user,\n 'hasdisplay': self.hasdisplay,\n 'connected': self.connected,\n 'gitshort': self.gitshort,\n 'gitlong': self.gitlong,\n 'local': self.local,\n 'error': self.error,\n }\n\n if self.connected:\n # verify that we actually are connected\n self._ping(timeout=2)\n\n lock_data = self.lock_data()\n ret.update({\n 'exp_running': lock_data.get('running'),\n 'exp_pid': lock_data.get('pid'),\n 'exp_starttime': lock_data.get('starttime'),\n 'exp_runtime': lock_data.get('runtime')\n })\n else:\n ret['exp_running'] = False\n\n return ret\n\n def connect(self, done_callback=None):\n self.error = \"connecting...\"\n self.local = (self.ip == get_routed_ip())\n if not self.local:\n # only connect if it's a separate machine\n try:\n # -oBatchMode=yes to disable password auth and just fail if key auth fails\n self._tunnel = plumbum.SshMachine(self.ip, user=self.user, ssh_opts=['-oBatchMode=yes'])\n except (plumbum.machines.session.SSHCommsChannel2Error, plumbum.machines.session.SSHCommsError):\n self.error = \"SSH connection failure\"\n self._tunnel = None\n return\n\n self._rpc = rpyc.ssh_connect(self._tunnel, self.port)\n\n else:\n self._rpc = rpyc.connect(\"localhost\", self.port)\n\n self.error = None\n if done_callback is not None:\n done_callback(self.name)\n\n def down(self, error=None):\n if self._rpc:\n try:\n self._rpc.close()\n except AttributeError:\n pass # always throws one in Session.close()... bug?\n self._rpc = None\n if self._tunnel:\n self._tunnel.close()\n self._tunnel = None\n\n self.error = error\n\n def sync_data(self):\n ''' Copy/sync track data from this box to the local track directory.'''\n print(\"{}: Starting sync.\".format(self.name))\n\n assert self.connected\n\n # If data is already local, no need to sync\n assert not self.local\n\n # Double-check that this box isn't running an experiment\n if self.lock_exists():\n return\n\n # Copy remote files into an archive dir, then have rsync\n # delete the originals after the transfer\n cp_cmd = self._tunnel[\"cp\"]\n res = cp_cmd(\"-r\", self.trackdir, self.archivedir)\n print(\"{}: cp command got: {}\".format(self.name, res))\n # Currently does *not* copy the debugframes (following line is\n # commented), so they will be removed from remote entirely.\n #cp_cmd(\"-r\", self.dbgframedir, self.archivedir)\n\n # NOTE: Source must end with / to copy the *contents* of the folder\n # instead of copying the source folder into the destination as a new\n # folder there.\n cmd = ['rsync', '-rvt', '--remove-source-files', '%s@%s:%s/' % (self.user, self.ip, self.trackdir), str(config.TRACKDIR / self.name)]\n res = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n print(\"{}: rsync trackfiles command got: {}\".format(self.name, res))\n\n cmd = ['rsync', '-rvt', '--remove-source-files', '%s@%s:%s/' % (self.user, self.ip, self.dbgframedir), str(config.DBGFRAMEDIR / self.name)] # '' to ensure trailing /\n res = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n print(\"{}: rsync dbgframes command got: {}\".format(self.name, res))\n\n def _ping(self, timeout):\n '''\n Attempt to call a function on the server over this connection with a\n given timeout (in seconds).\n '''\n def timeout_close():\n self.down(\"server timed out; connection closed\")\n # The server didn't deregister itself, so we need to here\n # so that zeroconf will properly add the service when it returns.\n self.deregister_callback()\n\n timer = threading.Timer(timeout, timeout_close)\n timer.start()\n try:\n self.lock_exists() # any simple function call; return ignored\n finally:\n timer.cancel()\n\n @property\n def connected(self):\n return self._rpc and not self._rpc.closed\n\n def __getattr__(self, name):\n '''Return something from self.rpc if it wasn't found in this object\n directly. Lets us use one object namespace to access both \"local\"\n methods like sync_data() and remote RPC methods.'''\n if self.connected and hasattr(self._rpc.root, name):\n return getattr(self._rpc.root, name)\n else:\n # default behavior\n raise AttributeError\n\n\nclass BoxManager(object):\n def __init__(self, engine):\n self._engine = engine\n\n self._boxes = dict()\n self._boxlock = threading.Lock()\n\n self._updatequeue = Queue()\n\n # work around a bug in zeroconf on Cygwin\n try:\n zeroconf = Zeroconf()\n except socket.error:\n zeroconf = Zeroconf([\"0.0.0.0\"])\n self._browser = ServiceBrowser(zeroconf, \"_atlesbox._tcp.local.\", self) # starts its own daemon thread\n\n # start separate thread for:\n # - polling boxes\n t = threading.Thread(target=auto_restart(self._poll_boxes))\n t.daemon = True\n t.start()\n # - handling the explicit update queue\n t = threading.Thread(target=auto_restart(self._watch_queue))\n t.daemon = True\n t.start()\n\n def add_service(self, zeroconf, type, name):\n ''' Called automatically by ServiceBrowser. '''\n info = zeroconf.get_service_info(type, name)\n print(\"Service %s added, service info: %s\" % (name, info))\n boxname = info.properties[b'name'].decode()\n assert boxname == name.split('.')[0]\n\n # make a function for deregistering this box\n def deregister():\n # Do just enough to make zeroconf register the service\n # when it returns.\n # (This is used when a box losesconnection without\n # deregistering itself.)\n del self._browser.services[info.name.lower()]\n\n newbox = Box(name=boxname,\n ip=socket.inet_ntoa(info.address),\n port=info.port,\n properties=info.properties,\n deregister_callback=deregister\n )\n\n # connect in a separate thread so we don't have to wait for the connection here\n threading.Thread(target=newbox.connect, args=[self._updatequeue.put]).start()\n with self._boxlock:\n self._boxes[boxname] = newbox\n self._updatequeue.put(boxname)\n\n def remove_service(self, zeroconf, type, name):\n ''' Called automatically by ServiceBrowser. '''\n print(\"Service %s removed\" % name)\n boxname = name.split('.')[0]\n with self._boxlock:\n self._boxes[boxname].down()\n self._updatequeue.put(boxname)\n\n def get_boxes(self):\n with self._boxlock:\n return copy.copy(self._boxes)\n\n def _update_box_db(self, box, boxinfo, conn):\n # add current time to boxinfo\n boxinfo['last_updated'] = time.time()\n\n boxes = db_schema.boxes\n # check whether this box is in the database yet\n select = sql.select([boxes.c.name]).where(boxes.c.name == box)\n box_exists = conn.execute(select).scalar()\n if box_exists:\n # if so, update\n update = boxes.update().where(boxes.c.name == box).values(boxinfo)\n conn.execute(update)\n else:\n # if not, insert\n insert = boxes.insert(boxinfo)\n conn.execute(insert)\n\n def _update_box_datafiles(self, box, boxinfo, conn):\n ''' Checks for newer datafiles; syncs if any are found. '''\n box_rpc = self._boxes[box]\n # Get mtimes of latest remote and local data files\n latest_remote = box_rpc.max_datafile_mtime()\n if latest_remote is None:\n # No files present on remote\n return\n boxtrackdir = config.TRACKDIR / box\n latest_local = max_mtime(boxtrackdir)\n\n # *Ugly* hack to \"de-netref\" the rpyc-returned object\n # Otherwise we can't compare it to a real datetime object...\n timetuple = list(latest_remote.timetuple())[:6]\n timetuple.append(latest_remote.microsecond)\n latest_remote = datetime.datetime(*timetuple)\n\n # If remote has newer, sync and update latest local time\n if latest_local is None or latest_local < latest_remote:\n box_rpc.sync_data()\n\n # check that update occurred\n diff = abs(latest_remote - max_mtime(boxtrackdir))\n if diff > datetime.timedelta(seconds=1):\n # warn w/ simple print for now\n print(\"Warning: sync may not have occurred for box {}. Got time delta {}.\".format(box, diff))\n\n def _update_box(self, box, conn):\n # get updated box data\n with self._boxlock:\n if box in self._boxes:\n boxinfo = self._boxes[box].get_info()\n else:\n boxinfo = {'connected': False}\n self._update_box_db(box, boxinfo, conn)\n if boxinfo['connected'] \\\n and not boxinfo['local'] \\\n and not boxinfo['exp_running'] \\\n and self._boxes[box].connected \\\n and not self._boxes[box].lock_exists():\n self._update_box_datafiles(box, boxinfo, conn)\n\n def _watch_queue(self):\n # Runs in its own thread\n # Needs a separate sqlite connection for a separate thread\n conn = self._engine.connect()\n while True:\n box = self._updatequeue.get()\n self._update_box(box, conn)\n\n def _poll_boxes(self):\n # Runs in its own thread\n # Needs a separate sqlite connection for a separate thread\n conn = self._engine.connect()\n boxes = db_schema.boxes\n select = sql.select([boxes.c.name])\n while True:\n # Poll/update all boxes every 2 seconds\n box_names = [row['name'] for row in conn.execute(select)]\n # quick sanity check: all boxes in our list of RPC objects must be registered in the DB\n for box in self._boxes:\n assert box in box_names\n for box in box_names:\n self._updatequeue.put(box)\n time.sleep(2)\n","repo_name":"liffiton/ATLeS","sub_path":"src/web/box_rpc.py","file_name":"box_rpc.py","file_ext":"py","file_size_in_byte":12558,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"20280331735","text":"# -*- coding: utf-8 -*-\n\"\"\"Recipe shelloutput\"\"\"\n\nimport subprocess\n\n\nclass Recipe(object):\n\n def __init__(self, buildout, name, options):\n cmds = options[\"commands\"].strip()\n output = {}\n if cmds:\n cmds = cmds.split('\\n')\n for cmd in cmds:\n if cmd:\n name, command = cmd.split('=')\n name = name.strip()\n command = command.strip()\n output[name] = self._execute_cmd(name, command)\n options.update(output)\n\n def _execute_cmd(self, name, command):\n if not command:\n return \"Empty command '%s', no output generated.\" % name\n process = subprocess.Popen([command],\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = process.communicate()\n if err:\n return \"Error '%s' for command '%s'.\" % (err.strip(), name)\n return out.strip()\n\n def install(self):\n return tuple()\n\n def update(self):\n self.install()\n","repo_name":"collective/collective.recipe.shelloutput","sub_path":"collective/recipe/shelloutput/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72580444105","text":"import sys\nimport argparse\nfrom lair.project_generator import ProjectGenerator\n\n\ndef main(args=sys.argv[1:]):\n parser = argparse.ArgumentParser(description='Service Example')\n parser.add_argument('--project-name', help='Name of the project',\n type=str, default=None, required=True)\n parser.add_argument('--with-db',\n help='With database', nargs='?',\n const=True, default=False, required=False)\n args = parser.parse_args(args=args)\n\n dependencies = []\n\n if args.with_db:\n dependencies.extend(['flask_migrate',\n 'flask_sqlalchemy',\n 'sqlalchemy'])\n\n # Make sure flask is below flask_sqlalchemy in the list\n # otherwise it fails to install because 'flask is missing'\n dependencies.extend(['lair', 'flask', 'click', 'konfig'])\n\n app = ProjectGenerator()\n app.set('project-name', args.project_name)\n app.set('with-db', args.with_db)\n app.set('dependencies', dependencies)\n\n try:\n app.initialize()\n except FileExistsError as e:\n print(\"!!!\", e)\n sys.exit(1)\n app.create()\n","repo_name":"yitsushi/lair","sub_path":"lair/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30290491625","text":"from __future__ import print_function\nimport argparse\nimport json\n\nimport yaml\nimport requests\n\nimport cnrclient\nfrom cnrclient.client import CnrClient\nfrom cnrclient.utils import parse_package_name, parse_version, split_package_name\n\n\ndef _set_package(parser, namespace, dest, package_parts):\n parsed_version = parse_version(package_parts['version'])\n setattr(namespace, \"registry_host\", package_parts['host'])\n setattr(namespace, 'version', parsed_version['value'])\n setattr(namespace, 'version_parts', parsed_version)\n package = \"%s/%s\" % (package_parts['namespace'], package_parts['package'])\n setattr(namespace, dest, package)\n setattr(namespace, \"package_parts\", package_parts)\n\n\nclass PackageName(argparse.Action):\n def __call__(self, parser, namespace, value, option_string=None):\n try:\n name = value[0]\n package_parts = parse_package_name(name)\n _set_package(parser, namespace, self.dest, package_parts)\n except ValueError as exc:\n raise parser.error(exc.message)\n\n\nclass PackageSplit(argparse.Action):\n def __call__(self, parser, namespace, value, option_string=None):\n name = value\n package_parts = split_package_name(name)\n _set_package(parser, namespace, self.dest, package_parts)\n\n\nclass CommandBase(object):\n name = 'command-base'\n help_message = 'describe the command'\n RegistryClient = CnrClient\n default_media_type = None\n\n def __init__(self, args_options):\n self.args_options = args_options\n self.output = args_options.output\n\n def render(self):\n if self.output == 'none':\n return\n elif self.output == 'json':\n self._render_json()\n elif self.output == 'yaml':\n self._render_yaml()\n else:\n print(self._render_console())\n\n @classmethod\n def call(cls, options):\n try:\n cls(options)()\n except requests.exceptions.RequestException as exc:\n raise argparse.ArgumentTypeError(exc.message)\n\n def __call__(self):\n self._call()\n self.render()\n\n @classmethod\n def add_parser(cls, subparsers):\n parser = subparsers.add_parser(cls.name, help=cls.help_message)\n cls._add_output_option(parser)\n cls._add_arguments(parser)\n parser.set_defaults(func=cls.call)\n\n def _render_json(self):\n print(json.dumps(self._render_dict(), indent=2, separators=(',', ': ')))\n\n def _render_dict(self):\n raise NotImplementedError\n\n def _render_console(self):\n raise NotImplementedError\n\n def _render_yaml(self):\n print(yaml.safe_dump(self._render_dict()))\n\n def _call(self):\n raise NotImplementedError\n\n @classmethod\n def _add_arguments(cls, parser):\n raise NotImplementedError\n\n @classmethod\n def _add_registryhost_option(cls, parser):\n parser.add_argument(\"-H\", \"--registry-host\", default=cnrclient.client.DEFAULT_REGISTRY,\n help='registry API url')\n\n @classmethod\n def _add_output_option(cls, parser):\n parser.add_argument(\"--output\", default=\"text\", choices=['text',\n 'none',\n 'json',\n 'yaml'],\n help=\"output format\")\n\n @classmethod\n def _add_mediatype_option(cls, parser, default=None, required=True):\n if default is None:\n default = cls.default_media_type\n if default is not None:\n required = False\n\n parser.add_argument(\"-t\", \"--media-type\", default=default, required=required,\n help='package format: [kpm, kpm-compose, helm]')\n\n @classmethod\n def _add_packagename_option(cls, parser):\n parser.add_argument('package', nargs=1, default=None,\n action=PackageName, help=\"package-name\")\n\n @classmethod\n def _add_packagesplit_option(cls, parser):\n parser.add_argument('package', nargs=\"?\", default=None,\n action=PackageSplit, help=\"registry-host.com/namespace/name\")\n\n @classmethod\n def _add_packageversion_option(cls, parser):\n parser.add_argument(\"-v\", \"--version\",\n help=\"package VERSION\", default='default')\n\n @classmethod\n def _add_registryhost_arg(cls, parser):\n parser.add_argument(\"registry_host\", nargs='?',\n default=cnrclient.client.DEFAULT_REGISTRY,\n help='registry API url')\n","repo_name":"philips/cnr-cli","sub_path":"cnrclient/commands/command_base.py","file_name":"command_base.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35383923158","text":"# формирует интервал времени о τ, по истечении которого завершится обслуживание требования\nimport math\nimport random\nimport Condition\nfrom Constant import *\nfrom Device import Device\nfrom Requirement import Requirement\n\n\ndef random_exp(coef):\n R = random.random()\n return (-1 / coef) * math.log(R)\n\n\n# определяет момент выполнения сегмента процесса\ndef next_moment(n, coef):\n return n + random_exp(coef)\n\n\ndef requirements_segment(t):\n global N_requirement, device, fail_req\n requirement = Requirement() # создает новое требовани\n if device.get_Q_size() < queue_max_size: # проверка наличия места в очереди\n requirement.set_t_postyp(t) # наделяет это требование значениями атрибутов\n device.put_into_Q(requirement) # ставит требование в очередь\n N_requirement += 1\n #device.set_s_start(next_moment(t, nu)) # nu\n # иначе отказ в обслуживании\n device.set_s_postyp(next_moment(t, lambd)) # определяет очередной момент выполнения сегмента процесса\n fail_req += 1\n\n\ndef set_start_segment(t):\n if not device.is_Q_Empty():\n device.set_s_start(next_moment(t, nu)) # nu\n else:\n device.set_s_start(infinity)\n\ndef start_service_segment(t):\n global device\n if device.is_Q_Empty(): # Если очередь Q пуста\n device.set_s_start(infinity) # сегмент завершает свою работу\n elif device.get_condition() != Condition.Condition.BUSY: # если устройство не занято\n requirement = device.take_Q_elem() # удалили из очереди задачу\n device.set_condition(Condition.Condition.BUSY) # переводит прибор в состояние «занят»\n requirement.set_t_start(t) # наделяет выбранное требование значениями атрибутов\n\n device.set_s_end(next_moment(t, nu)) # момент выполнения сегмента процесса, связанного с уходом требования\n device.set_s_start(infinity) # момент активизации сегмента начала устанавливается в бесконечность\n device.set_actual_requirement(requirement) # требование над которым будет работать сегмент процесса\n else:\n device.set_s_start(infinity) # если устройство занято, то завершаем процесс. Ждем момента от сегмента завершения\n\ndef end_service_segment(t):\n global N_requirement, device\n\n requirement = device.get_actual_requirement()\n requirement.set_t_end(t) # изменяет значения атрибутов требования\n device.put_into_Q_served(requirement) # ставит требование, завершившее обслуживание, в очередь\n N_requirement -= 1\n device.set_condition(Condition.Condition.FREE) # переводит прибор в состояние «свободен»\n device.set_s_start(t) #активизируется сегмента процесса начала обслуживания\n device.set_s_end(infinity) # сегмент ухода завершает работу\n return requirement.get_t_end() - requirement.get_t_start() + requirement.get_t_start() - requirement.get_t_postyp()\n\n\n# счетчик модельного времени\nn_n = 0\ndevice = Device()\nN_requirement = 0\nfail_req = 0\n\ninterval_0_start = infinity\ninterval_0_end = infinity\ninterval_1_start = infinity\ninterval_1_end = infinity\ninterval_2_start = infinity\ninterval_2_end = infinity\ninterval_3_start = infinity\ninterval_3_end = infinity\ninterval_4_start = infinity\ninterval_4_end = infinity\ninterval_5_start = infinity\ninterval_5_end = infinity\n\ninterval_0 = []\ninterval_1 = []\ninterval_2 = []\ninterval_3 = []\ninterval_4 = []\ninterval_5 = []\ni = 0\n\n\ndef saveInterval(interval_list, interval_start, interval_end):\n interval_list.append(interval_end - interval_start)\n interval_start = infinity\n interval_end = infinity\n return interval_list, interval_start, interval_end\n\ndef redirect_timer(interval_0_start, interval_0_end, interval_0_list,\n interval_1_start, interval_1_end, interval_1_list,\n interval_2_start, i):\n if interval_0_start != infinity:\n interval_0_end = i\n interval_0_list, interval_0_start, interval_0_end = saveInterval(interval_0_list, interval_0_start, interval_0_end)\n\n elif interval_1_start != infinity:\n interval_1_end = i\n interval_1_list, interval_1_start, interval_1_end = saveInterval(interval_1_list, interval_1_start, interval_1_end)\n\n if interval_2_start == infinity:\n interval_2_start = i\n # иначе продолжаем счет интервала\n return interval_0_start, interval_0_end, interval_0_list, interval_1_start, \\\n interval_1_end, interval_1_list, interval_2_start\n\ndef time_fixation(i):\n global interval_0_start, interval_0_end, interval_0, interval_1_start, interval_1_end, interval_1, \\\n interval_2_start, interval_2_end, interval_2, interval_3_start, interval_3_end, interval_3, \\\n interval_4_start, interval_4_end, interval_4, interval_5_start, interval_5_end, interval_5, N_requirement\n if N_requirement == 0:\n if interval_0_start == infinity:\n interval_0_start = i\n if len(interval_0) > 0:\n interval_1_end = i\n saveInterval(interval_1, interval_1_start, interval_1_end)\n # иначе продолжаем счет для нулевого интервала\n\n elif N_requirement == 1:\n interval_0_start, interval_0_end, interval_0, interval_2_start, interval_2_end, interval_2, \\\n interval_1_start = redirect_timer(interval_0_start, interval_0_end, interval_0, interval_2_start,\n interval_2_end, interval_2, interval_1_start, i)\n\n elif N_requirement == 2:\n interval_1_start, interval_1_end, interval_1, interval_3_start, interval_3_end, interval_3, \\\n interval_2_start = redirect_timer(interval_1_start, interval_1_end, interval_1, interval_3_start,\n interval_3_end, interval_3, interval_2_start, i)\n\n elif N_requirement == 3:\n interval_2_start, interval_2_end, interval_2, interval_4_start, interval_4_end, interval_4, \\\n interval_3_start = redirect_timer(interval_2_start, interval_2_end, interval_2, interval_4_start,\n interval_4_end, interval_4, interval_3_start, i)\n\n elif N_requirement == 4:\n interval_3_start, interval_3_end, interval_3, interval_5_start, interval_5_end, interval_5, \\\n interval_4_start = redirect_timer(interval_3_start, interval_3_end, interval_3, interval_5_start,\n interval_5_end, interval_5, interval_4_start, i)\n\n elif N_requirement == 5:\n if interval_5_start == infinity:\n interval_5_start = i\n interval_4_end = i\n interval_4, interval_4_start, interval_4_end = saveInterval(interval_4, interval_4_start, interval_4_end)\n # return interval_0_start, interval_0_end, interval_0, interval_1_start, interval_1_end, interval_1, \\\n # interval_2_start, interval_2_end, interval_2, interval_3_start, interval_3_end, interval_3, \\\n # interval_4_start, interval_4_end, interval_4, interval_5_start, interval_5_end, interval_5, N_requirement\n\n\nwhile device.get_Q_served_size() < N:\n i = min(device.get_s_postyp(), device.get_s_start(), device.get_s_end())\n time_fixation(i)\n if device.get_s_postyp() == i:\n requirements_segment(i)\n\n if device.get_s_start() == i:\n start_service_segment(i)\n\n if device.get_s_end() == i:\n n_n += end_service_segment(i)\n\n if (device.get_s_start() < i) | (device.get_s_start() == infinity): # лишние установки времени\n set_start_segment(i)\n\n######################################################################################################\n\nK = device.get_Q_served_size()\nT = i\nmat_exp = 1/T\nq = device.get_Q_served()\nsum_ = 0\nwhile not q.empty():\n end = q.get().get_t_end()\n queue_moment = q.get().get_t_start()\n sum_ += (end - queue_moment)\n\nprint(\"оценка математического ожидания длительности пребывания требований в системе обслуживания =\", sum_ / K)\n\n\ninterval1_sum = sum(interval_1) / T\ninterval2_sum = sum(interval_2) / T\ninterval3_sum = sum(interval_3) / T\ninterval4_sum = sum(interval_4) / T\ninterval5_sum = sum(interval_5) / T\n\nn_ = interval1_sum + 2 * interval2_sum + 3 * interval3_sum + 4 * interval4_sum + 5 * interval5_sum\n\nprint(\"оценка математического ожидания числа требований в системе обслуживания =\", n_)\n\n\nflow_intensivity = (N_requirement + fail_req) / i\nflow_service = K / i\nprint(\"вероятность отказа в обслуживании требования =\", flow_intensivity / (flow_intensivity + flow_service))","repo_name":"Meowchine1/pythonProject","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":9567,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27366910860","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nfrom pip.req import parse_requirements\nimport pip.download\n\ntry:\n long_description = open(\"README.rst\").read()\nexcept IOError:\n long_description = \"\"\n\nrequirements = list(parse_requirements('requirements.txt',\n session=pip.download.PipSession()))\n\ninstall_requires = [str(r.req) for r in requirements]\n\nsetup(\n name=\"inoket-email\",\n version=\"0.0.6\",\n description=\"AWS SES Email helpers for Inoket\",\n license=\"MIT\",\n author=\"pebble {code}\",\n packages=find_packages(),\n install_requires=install_requires,\n long_description=long_description,\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n ]\n)\n","repo_name":"pebblecode/cirrus-email","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18515624656","text":"'''문자열 반복'''\nimport sys\nread = sys.stdin.readline\n\nfor _ in range(int(read())):\n n, s = map(str, read().split())\n string = ''\n for ss in s:\n string += ss*int(n)\n print(string)","repo_name":"hanqpark/coding_test","sub_path":"boj/강의/7. 문자열/boj_2675.py","file_name":"boj_2675.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18391900795","text":"import math\n# WORK INPROGRESS\ndef rgb(r, g, b):\n# 100, 10, 1\n# 16, 1\n\n# divide number by 16\n# rounded down the number that returns\n# print(math.floor(r / 16))\n# get remainder of the division\n# print(r % 16)\n quotient = r\n final = \"\"\n while True:\n if quotient == 0:\n break\n final = final + str(quotient % 16)\n quotient = math.floor(quotient / 16)\n print(final)","repo_name":"ZacRayTho/Leetcode-Codewars-Solutions","sub_path":"Codewars/Python/RgbToHex.py","file_name":"RgbToHex.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74975363464","text":"import torch\nimport matplotlib.pyplot as plt\nfrom util import u0Presets, regular_sample\n\n# load model\nmodel = torch.load(\"../models/advection1d.pt\")\n\n# plot attempted solution\nt, x = regular_sample(n=101, d=1, requires_grad=False)\nfx = model(t, x).detach()\nplt.plot(x[t == 0].numpy(), u0Presets().square(x[t == 0]).numpy(), label=\"u0\")\nplt.plot(x[t == 0].numpy(), fx[t == 0].numpy(), label=\"t=0\")\nplt.plot(x[t == 0.5].numpy(), fx[t == 0.5].numpy(), label=\"t=0.5\")\nplt.plot(x[t == 1].numpy(), fx[t == 1].numpy(), label=\"t=1\")\nplt.legend()\nplt.show()\n","repo_name":"jpalafou/dgmNet","sub_path":"src/advection1d_plot.py","file_name":"advection1d_plot.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42494193195","text":"\"\"\"Problem\"\"\"\n# A common substring of a collection of strings is a substring of every member of the\n# collection. We say that a common substring is a longest common substring if there does\n# not exist a longer common substring. For example, \"CG\" is a common substring of\n# \"ACGTACGT\" and \"AACCGTATA\", but it is not as long as possible; in this case,\n# \"CGTA\" is a longest common substring of \"ACGTACGT\" and \"AACCGTATA\".\n\n# Note that the longest common substring is not necessarily unique; for a simple example,\n# \"AA\" and \"CC\" are both longest common substrings of \"AACC\" and \"CCAA\".\n\n# Given: A collection of k (k≤100) DNA strings of length at most 1 kbp each in FASTA format.\n\n# Return: A longest common substring of the collection. (If multiple solutions exist,\n# you may return any single solution.)\n\n# Sample Dataset\n# >Rosalind_1\n# GATTACA\n# >Rosalind_2\n# TAGACCA\n# >Rosalind_3\n# ATACA\n\n# Sample Output\n# AC\n\n# ------Version 1.0---------\n\n\n# DNA = open(\"C:/Users/nayan/Desktop/new 7.txt\", \"r\")\n# Fasta_dict = {}\n# Fasta_label = \"\"\n# DNA = DNA.readlines()\n# def dict_mk():\n# for line in DNA:\n# line = line.rstrip()\n# if line.startswith(\">\"):\n# Fasta_label = line[1:] \n# Fasta_dict[Fasta_label] =\"\"\n# else:\n# Fasta_dict[Fasta_label] += line\n# Fasta_seqs = list(Fasta_dict.values())\n# return Fasta_seqs\n\n# Substring = []\n# for Fasta_seq_num in range(0,len(dict_mk())):\n# Fasta_seq = dict_mk()[Fasta_seq_num]\n# Substring.append([])\n# for n in range(0,len(Fasta_seq)):\n# for i in range(0,len(Fasta_seq)):\n# if (i+n+200) - (0+i) == len(Fasta_seq[0+i:i+n+200]):\n# Substring[Fasta_seq_num].append(Fasta_seq[0+i:i+n+200])\n\n# for o in Substring:\n# for p in o:\n# print(p)\n# # ls = [set(l) for l in Substring]\n\n# # exec_string = \"ls[{}]\"\n# # string = \"\"\n# # for seq in range(0,len(Substring)):\n# # string += exec_string.format(seq) + \" & \"\n\n# # string = \"print(max(\"+string[0:len(string)-3]+\",key=len))\"\n# # exec(string)\n\n\n\n# # ------Version 2.0---------\n\n# DNA = open(\"C:/Users/nayan/Downloads/rosalind_lcsm.txt\", \"r\")\n\n# Fasta_dict = {}\n# Fasta_label = \"\"\n# DNA = DNA.readlines()\n# def dict_mk():\n# for line in DNA:\n# line = line.rstrip()\n# if line.startswith(\">\"):\n# Fasta_label = line[1:] \n# Fasta_dict[Fasta_label] =\"\"\n# else:\n# Fasta_dict[Fasta_label] += line\n# Fasta_seqs = list(Fasta_dict.values())\n# return Fasta_seqs\n\n# def frist_string_separetion():\n# Substring = []\n# Frist_string = dict_mk()[0]\n# for n in range(0,len(Frist_string)):\n# for i in range(0,len(Frist_string)):\n# if (i+n+200) - (0+i) == len(Frist_string[0+i:i+200+n]):\n# Substring.append(Frist_string[0+i:i+n+200])\n# #print(Frist_string[0+i:i+n+2])\n# return Substring\n \n\n# def motif(DNA,substring):\n# substring_location = []\n# y = len(substring)\n# for i in range(0,len(DNA)):\n# x = DNA.find(substring,i,i+y)\n# if x != -1:\n# substring_location.append(x+1)\n# else:\n# exit\n# if not substring_location:\n# substring_location = None\n# return substring_location\n\n\n\n# generally_share_motif = []\n# longest_string = ''\n# for sami_sub in frist_string_separetion():\n# counter = 0\n# for Seq_num,Seq in enumerate(dict_mk()): \n# if Seq_num == 0:\n# continue\n# if motif(Seq,sami_sub) is not None:\n# counter += 1\n \n# if counter == len(dict_mk())-1:\n# generally_share_motif.append(sami_sub)\n# #print(motif(Seq,sami_sub),sami_sub,Seq)\n# if sami_sub>longest_string:\n# longest_string = sami_sub\n# print(longest_string)\n \n# # print(max(generally_share_motif,key=len))\n\n\n\n\n\n# # ------Version 3.0---------\nfrom Bio import SeqIO\n\n\nFasta_seqs = []\n#for record in SeqIO.parse(\"C:/Users/nayan/Desktop/new 7.txt\", 'fasta'):\nfor record in SeqIO.parse(\"C:/Users/nayan/Downloads/rosalind_lcsm.txt\", 'fasta'):\n Fasta_seqs.append(str(record.seq))\n \n\n\ndef frist_string_separetion():\n Substring = []\n Frist_string = Fasta_seqs[0]\n for n in range(0,len(Frist_string)):\n for i in range(0,len(Frist_string)):\n if (i+n+2) - (0+i) == len(Frist_string[0+i:i+2+n]):\n Substring.append(Frist_string[0+i:i+n+2])\n return Substring\n \n\nprint(len(frist_string_separetion()))\nlongest_string = ''\nresult = frist_string_separetion()\nfinal_result = []\nfor sami_sub_num,sami_sub in enumerate(result):\n counter = 0\n for Seq_num,Seq in enumerate(Fasta_seqs): \n if Seq_num == 0:\n continue\n if str(sami_sub) in str(Seq):\n counter += 1\n if counter == len(Fasta_seqs)-1:\n # print(sami_sub)\n if sami_sub not in final_result:\n final_result.append(sami_sub)\n #print(len(final_result))\n \n\n\nprint(max(final_result,key=len))","repo_name":"Tharindakarawita/rosalind","sub_path":"Finding a Shared Motif.py","file_name":"Finding a Shared Motif.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18558363379","text":"import os\nimport argparse\nimport torch\nimport numpy as np\n\nif __name__ == '__main__':\n torch.multiprocessing.freeze_support()\n\n parser = argparse.ArgumentParser(description='Sentiment Deep Metric Learning')\n\n parser.add_argument(\"--module\", default=\"ml\", type=str)\n parser.add_argument(\"--data\", type=str, default=\"SS-Youtube\")\n parser.add_argument(\"--device\", type=str, default=\"cuda:0\")\n parser.add_argument(\"--experiment\", type=str, default=\"default\")\n\n parser.add_argument(\"--model\", default=\"lstm\", type=str)\n parser.add_argument(\"--pretrained\", type=str, default=\"xlm-roberta-large\")\n\n parser.add_argument(\"--lr\", type=float, default=0.001)\n parser.add_argument(\"--lr_center\", type=float, default=0.5)\n parser.add_argument(\"--dropout\", type=float, default=0.3)\n parser.add_argument(\"--rec_dropout\", type=float, default=0)\n parser.add_argument(\"--weight_decay\", type=float, default=1e-8)\n parser.add_argument(\"--batch_size\", type=int, default=32)\n\n parser.add_argument(\"--pooling\", type=str, default=\"attention\")\n parser.add_argument(\"--hidden_size\", type=int, default=128)\n parser.add_argument(\"--num_layers\", type=int, default=1)\n parser.add_argument(\"--no_bidirectional\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--num_epochs\", type=int, default=100)\n parser.add_argument(\"--patience\", type=int, default=5)\n\n parser.add_argument(\"--no_train\", action=\"store_true\", default=False)\n parser.add_argument(\"--finetune\", type=str, default=None)\n parser.add_argument(\"--finetune_type\", type=str, default=\"change_final\")\n parser.add_argument(\"--freeze_bert\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--loss\", type=str, default=\"focal\")\n\n parser.add_argument(\"--weights\", type=str, default=\"dynamic_loss_size\")\n parser.add_argument(\"--thresholding\", type=str, default=\"class_specific\")\n\n parser.add_argument(\"--num_bins\", type=int, default=1000)\n parser.add_argument(\"--alpha\", type=float, default=1.0)\n parser.add_argument(\"--lambda\", type=float, default=0.003)\n parser.add_argument(\"--weight_smoothing\", type=float, default=0.05)\n parser.add_argument(\"--beta\", type=float, default=0.9999)\n parser.add_argument(\"--gamma\", type=float, default=2.0)\n \n parser.add_argument(\"--prepro\", type=str, default=\"all\")\n\n cfg = vars(parser.parse_args())\n\n cfg[\"bidirectional\"] = not cfg[\"no_bidirectional\"]\n\n data = ['SCv2-GEN', 'PsychExp']\n cfg['all_datasets'] = ['SS-Twitter', 'SS-Youtube', 'SCv1', 'SCv2-GEN', 'SE0714', 'Olympic', 'SemEval_Arabic', 'SemEval_English', 'SemEval_Spanish', 'SemEval_Arabic_English', 'SemEval_Arabic_Spanish', 'SemEval_English_Spanish', 'SemEval_English_Turkish', 'SemEval_Arabic_English_Spanish', 'SemEval_Turkish', 'SemEval_Tran_Spanish']\n cfg['ml_datasets'] = ['SE0714', 'Olympic', 'emoji-tweets']\n\n if cfg[\"data\"] != \"all\":\n data = [cfg[\"data\"]]\n\n if not torch.cuda.is_available():\n cfg[\"device\"] = \"cpu\"\n\n for datum in data:\n np.random.seed(61)\n torch.manual_seed(61)\n cfg[\"data\"] = datum\n\n if cfg[\"module\"] == \"feature_extraction\":\n from feature_extraction import FeatureExtraction\n\n fe = FeatureExtraction(cfg)\n fe.extract()\n elif cfg[\"module\"] == \"ml\":\n print(f\"Experimenting with the data {datum}\")\n from experiment_ml import ExperimentMultiLabelClassification\n experiment = ExperimentMultiLabelClassification(cfg)\n experiment.run()\n elif cfg[\"module\"] == \"etp\":\n from utils.emoji_tweets_preprocessing import EmojiTweetsPreprocessing\n etp = EmojiTweetsPreprocessing()\n etp.run()\n break\n elif cfg[\"module\"] == \"dataset_table\":\n from asset_generators.latex_handler import LatexHandler\n LatexHandler(cfg)\n elif cfg[\"module\"] == \"bert\":\n from experiment_bert import ExperimentBERT\n experiment = ExperimentBERT(cfg)\n experiment.run()\n elif cfg[\"module\"] == \"random\":\n from experiment_random import ExperimentRandom\n experiment = ExperimentRandom(cfg)\n experiment.run()\n elif cfg[\"module\"] == \"emojis_zipf\":\n from asset_generators.zipf_emojis import EmojisZipfLaw\n emojis_zipf = EmojisZipfLaw()\n emojis_zipf.run()\n elif cfg[\"module\"] == \"fasttext\":\n from experiment_fasttext import ExperimentFasttext\n experiment = ExperimentFasttext(cfg)\n experiment.run()\n elif cfg[\"module\"] == \"fasttext-auto\":\n from experiment_fasttext import ExperimentFasttext\n experiment = ExperimentFasttext(cfg)\n experiment.run_auto()\n elif cfg[\"module\"] == \"get_best_results\":\n from asset_generators.get_best_results import get_best_results\n get_best_results()\n else:\n raise Exception(\"Unknown module!\")\n","repo_name":"selimfirat/multilingual-sentiment-analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28262858311","text":"import re\r\nfrom prettytable import PrettyTable\r\n\r\n\r\nclass Scanner:\r\n def __init__(self, file_path):\r\n self.file = open(file_path, 'r')\r\n self.tokens = []\r\n self.errors = []\r\n self.tokenType=[]\r\n self.tokenName=[]\r\n self.keywords = [\r\n 'False', 'None', 'True', 'and', 'as', 'assert', 'async', 'await', 'break', 'class', 'continue', 'def',\r\n 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is',\r\n 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield'\r\n ]\r\n\r\n def scan(self):\r\n for line_num, line in enumerate(self.file, start=1):\r\n line = line.strip()\r\n # Remove comments starting with '#'\r\n comment_index = line.find('#')\r\n if comment_index != -1:\r\n line = line[:comment_index]\r\n\r\n column_num = 1 # Initialize column number \r\n\r\n while line:\r\n if re.match(r'^[a-zA-Z][a-zA-Z0-9_]*', line):\r\n identifier = re.match(r'^[a-zA-Z][a-zA-Z0-9_]*', line).group()\r\n if identifier in self.keywords:\r\n self.tokens.append(('KEYWORD', identifier, line_num, column_num))\r\n self.tokenType.append('KEYWORD')\r\n self.tokenName.append(identifier)\r\n \r\n else:\r\n self.tokens.append(('IDENTIFIER', identifier, line_num, column_num))\r\n self.tokenType.append('IDENTIFIER')\r\n if len(identifier) ==1 :\r\n self.tokenName.append('ID')\r\n else:\r\n self.tokenName.append(identifier)\r\n line = line[len(identifier):].strip()\r\n elif re.match(r'^\"[^\\x00-\\x1F\\x7F]+\"', line):\r\n string_literal = re.match(r'^\"[^\\x00-\\x1F\\x7F]+\"', line).group()\r\n self.tokens.append(('STRING_LITERAL', string_literal, line_num, column_num))\r\n self.tokenType.append('STRING_LITERAL')\r\n self.tokenName.append(string_literal)\r\n line = line[len(string_literal):].strip()\r\n elif re.match(r'^[0-9]+', line):\r\n integer_literal = re.match(r'^[0-9]+', line).group()\r\n if int(integer_literal) > 2147483647:\r\n self.errors.append(('Lexical_Error', 'Integer value exceeds limit', line_num, column_num))\r\n else:\r\n self.tokens.append(('INTEGER_LITERAL', integer_literal, line_num, column_num))\r\n self.tokenType.append('INTEGER_LITERAL')\r\n self.tokenName.append(integer_literal)\r\n line = line[len(integer_literal):].strip()\r\n elif re.match(r'^\\+', line):\r\n self.tokens.append(('PLUS', '+', line_num, column_num))\r\n self.tokenType.append('OPERATOR_PLUS')\r\n self.tokenName.append('+')\r\n line = line[1:].strip()\r\n elif re.match(r'^-', line):\r\n self.tokens.append(('MINUS', '-', line_num, column_num))\r\n self.tokenType.append('OPERATOR_MINUS')\r\n self.tokenName.append('-')\r\n line = line[1:].strip()\r\n elif re.match(r'^\\*', line):\r\n self.tokens.append(('ASTERISK', '*', line_num, column_num))\r\n self.tokenType.append('OPERATOR_ASTERISK')\r\n self.tokenName.append('*')\r\n line = line[1:].strip()\r\n elif re.match(r'^//', line):\r\n self.tokens.append(('DOUBLE_SLASH', '//', line_num, column_num))\r\n self.tokenType.append('OPERATOR_D_SLASH')\r\n self.tokenName.append('//')\r\n line = line[2:].strip()\r\n elif re.match(r'^%', line):\r\n self.tokens.append(('PERCENT', '%', line_num, column_num))\r\n self.tokenType.append('OPERATOR_PERCENT')\r\n self.tokenName.append('%')\r\n line = line[1:].strip()\r\n elif re.match(r'^<=', line):\r\n self.tokens.append(('LESS_THAN_OR_EQUAL', '<=', line_num, column_num))\r\n self.tokenType.append('OPERATOR_LTE')\r\n self.tokenName.append('<=')\r\n line = line[2:].strip()\r\n elif re.match(r'^>=', line):\r\n self.tokens.append(('GREATER_THAN_OR_EQUAL', '>=', line_num, column_num))\r\n self.tokenType.append('OPERATOR_GTE')\r\n self.tokenName.append('>=')\r\n line = line[2:].strip()\r\n elif re.match(r'^==', line):\r\n self.tokens.append(('EQUAL_EQUAL', '==', line_num, column_num))\r\n self.tokenType.append('OPERATOR_EE')\r\n self.tokenName.append('==')\r\n line = line[2:].strip()\r\n elif re.match(r'^!=', line):\r\n self.tokens.append(('NOT_EQUAL', '!=', line_num, column_num))\r\n self.tokenType.append('OPERATOR_NE')\r\n self.tokenName.append('!=')\r\n line = line[2:].strip()\r\n elif re.match(r'^<', line):\r\n self.tokens.append(('LESS_THAN', '<', line_num, column_num))\r\n self.tokenType.append('OPERATOR_LT')\r\n self.tokenName.append('<')\r\n line = line[1:].strip()\r\n elif re.match(r'^>', line):\r\n self.tokens.append(('GREATER_THAN', '>', line_num, column_num))\r\n self.tokenType.append('OPERATOR_GT')\r\n self.tokenName.append('>')\r\n line = line[1:].strip()\r\n elif re.match(r'^=', line):\r\n self.tokens.append(('EQUAL', '=', line_num, column_num))\r\n self.tokenType.append('OPERATOR_E')\r\n self.tokenName.append('=')\r\n line = line[1:].strip()\r\n elif re.match(r'^\\(', line):\r\n self.tokens.append(('LEFT_PAREN', '(', line_num, column_num))\r\n self.tokenType.append('OPERATOR_LP')\r\n self.tokenName.append('(')\r\n line = line[1:].strip()\r\n elif re.match(r'^\\)', line):\r\n self.tokens.append(('RIGHT_PAREN', ')', line_num, column_num))\r\n self.tokenType.append('OPERATOR_RP')\r\n self.tokenName.append(')')\r\n line = line[1:].strip()\r\n elif re.match(r'^\\[', line):\r\n self.tokens.append(('LEFT_BRACKET', '[', line_num, column_num))\r\n self.tokenType.append('OPERATOR_LB')\r\n self.tokenName.append('[')\r\n line = line[1:].strip()\r\n elif re.match(r'^\\]', line):\r\n self.tokens.append(('RIGHT_BRACKET', ']', line_num, column_num))\r\n self.tokenType.append('OPERATOR_RB')\r\n self.tokenName.append(']')\r\n line = line[1:].strip()\r\n elif re.match(r'^,', line):\r\n self.tokens.append(('COMMA', ',', line_num, column_num))\r\n self.tokenType.append('OPERATOR_CM')\r\n self.tokenName.append(',')\r\n line = line[1:].strip()\r\n elif re.match(r'^:', line):\r\n self.tokens.append(('COLON', ':', line_num, column_num))\r\n self.tokenType.append('OPERATOR_CL')\r\n self.tokenName.append(':')\r\n line = line[1:].strip()\r\n elif re.match(r'^\\.', line):\r\n self.tokens.append(('DOT', '.', line_num, column_num))\r\n self.tokenType.append('OPERATOR_DOT')\r\n self.tokenName.append('.')\r\n line = line[1:].strip()\r\n elif re.match(r'^->', line):\r\n self.tokens.append(('ARROW', '->', line_num, column_num))\r\n self.tokenType.append('OPERATOR_ARROW')\r\n self.tokenName.append('->')\r\n line = line[2:].strip()\r\n else:\r\n self.errors.append(('Lexical_Error', f'Invalid token: {line[0]}', line_num, column_num))\r\n line = line[1:].strip()\r\n\r\n column_num += 1\r\n\r\n\r\n\r\n def print_tokenTypes(self):\r\n for tokenType in self.tokenType:\r\n print(tokenType)\r\n\r\n def print_tokenNames(self):\r\n for tokenName in self.tokenName:\r\n print(tokenName) \r\n\r\n def print_tokens(self):\r\n table = PrettyTable(['Type', 'Value', 'Row','Column'])\r\n for token in self.tokens:\r\n table.add_row(token)\r\n print(table)\r\n\r\n def print_errors(self):\r\n for error in self.errors:\r\n print(error)\r\n\r\n def close_file(self):\r\n self.file.close()\r\n\r\n def printter_to_parser(self):\r\n lista_tokens_parser = [token_type for _, token_type, _, _ in self.tokens]\r\n return lista_tokens_parser\r\n \r\n\r\n\r\n# Example usage\r\nscanner = Scanner('ejemploMis.txt')\r\nscanner.scan()\r\n\r\n#print(\"Tokens:\")\r\n#scanner.print_tokens()\r\n\r\n#print(\"Errors Scanner :\")\r\n#scanner.print_errors()\r\n\r\n#scanner.print_tokenTypes()\r\n#scanner.print_tokenNames()\r\n\r\ntokensFinal = scanner.tokenName.copy()\r\n\r\nclass Parser:\r\n def __init__(self, tokens):\r\n self.tokens = tokens\r\n self.current_token = None\r\n self.errors = []\r\n\r\n def parse(self):\r\n self.next_token()\r\n self.program()\r\n if not self.errors:\r\n print(\"Parser funciona sin errores\")\r\n else:\r\n for error in self.errors:\r\n print(error)\r\n\r\n def next_token(self):\r\n if self.tokens:\r\n self.current_token = self.tokens.pop(0)\r\n else:\r\n self.current_token = None\r\n\r\n def match(self, expected_token):\r\n if self.current_token == expected_token:\r\n self.next_token()\r\n else:\r\n self.errors.append(f\"Expected {expected_token}, found {self.current_token}\")\r\n\r\n def program(self):\r\n self.def_list()\r\n\r\n def def_list(self):\r\n if self.current_token == \"def\":\r\n self.def_()\r\n self.def_list()\r\n\r\n def def_(self):\r\n self.match(\"def\")\r\n self.match(\"ID\")\r\n self.match(\"(\")\r\n self.typed_var_list()\r\n self.match(\")\")\r\n self.return_()\r\n self.match(\":\")\r\n self.block()\r\n\r\n def typed_var_list(self):\r\n if self.current_token == \"ID\":\r\n self.typed_var()\r\n self.typed_var_list_tail()\r\n\r\n def typed_var_list_tail(self):\r\n if self.current_token == \",\":\r\n self.match(\",\")\r\n self.typed_var()\r\n self.typed_var_list_tail()\r\n\r\n def typed_var(self):\r\n self.match(\"ID\")\r\n self.match(\":\")\r\n self.type_()\r\n\r\n def type_(self):\r\n if self.current_token in [\"int\", \"str\"]:\r\n self.match(self.current_token)\r\n elif self.current_token == \"[\":\r\n self.match(\"[\")\r\n self.type_()\r\n self.match(\"]\")\r\n\r\n def return_(self):\r\n if self.current_token == \"->\":\r\n self.match(\"->\")\r\n self.type_()\r\n\r\n def block(self):\r\n self.statement_list()\r\n\r\n def statement_list(self):\r\n if self.current_token in [\"if\", \"while\", \"for\", \"ID\", \"pass\", \"return\"]:\r\n self.statement()\r\n self.statement_list()\r\n\r\n def statement(self):\r\n if self.current_token == \"if\":\r\n self.match(\"if\")\r\n self.expr()\r\n self.match(\":\")\r\n self.block()\r\n\r\n elif self.current_token == \"while\":\r\n self.while_loop()\r\n \r\n elif self.current_token == \"for\":\r\n self.match(\"for\")\r\n self.match(\"ID\")\r\n self.match(\"in\")\r\n self.expr()\r\n self.match(\":\")\r\n self.block()\r\n\r\n elif self.current_token == \"ID\":\r\n self.match(\"ID\")\r\n elif self.current_token == \"pass\":\r\n self.match(\"pass\")\r\n elif self.current_token == \"return\":\r\n self.match(\"return\")\r\n self.return_expr()\r\n\r\n def return_expr(self):\r\n if self.current_token != \"NEWLINE\":\r\n self.expr()\r\n \r\n def while_loop(self):\r\n self.match(\"while\")\r\n self.expr()\r\n self.match(\":\")\r\n self.block()\r\n\r\n def expr(self):\r\n self.or_expr()\r\n\r\n def or_expr(self):\r\n self.and_expr()\r\n\r\n def and_expr(self):\r\n self.not_expr()\r\n\r\n def not_expr(self):\r\n if self.current_token == \"not\":\r\n self.match(\"not\")\r\n self.comp_expr()\r\n else:\r\n self.comp_expr()\r\n\r\n def comp_expr(self):\r\n self.int_expr()\r\n\r\n def int_expr(self):\r\n self.term()\r\n\r\n def term(self):\r\n self.factor()\r\n\r\n def factor(self):\r\n if self.current_token == \"-\":\r\n self.match(\"-\")\r\n self.factor()\r\n elif self.current_token == \"ID\":\r\n self.match(\"ID\")\r\n elif self.current_token in [\"None\", \"True\", \"False\", \"INTEGER\", \"STRING\"]:\r\n self.match(self.current_token)\r\n elif self.current_token == \"[\":\r\n self.match(\"[\")\r\n self.expr_list()\r\n self.match(\"]\")\r\n elif self.current_token == \"(\":\r\n self.match(\"(\")\r\n self.expr()\r\n self.match(\")\")\r\n\r\n def expr_list(self):\r\n if self.current_token != \"]\":\r\n self.expr()\r\n self.expr_list_tail()\r\n\r\n def expr_list_tail(self):\r\n if self.current_token == \",\":\r\n self.match(\",\")\r\n self.expr()\r\n self.expr_list_tail()\r\n\r\n\r\n# Lista de tokens\r\n#tokens = [\"def\", \"foo\", \"(\", \"x\", \":\", \"int\", \")\", \"->\", \"str\", \":\", \"ID\", \"=\", \"ID\", \"NEWLINE\", \"def\", \"bar\", \"(\", \"y\", \":\", \"str\", \")\", \"->\", \"None\", \":\", \"pass\", \"NEWLINE\", \"if\", \"x\", \"==\", \"1\", \":\", \"pass\", \"else\", \":\", \"pass\", \"NEWLINE\", \"INVALID\"]\r\n\r\ntokens = [\"ID\", \"+\", \"ID\"]\r\n\r\n# Crear una instancia del parser\r\n#parser = Parser(tokens)\r\n\r\n# Ejecutar el análisis sintáctico\r\n#parser.parse()\r\n#scanner.close_file()\r\n\r\n","repo_name":"6162636465/Compilador","sub_path":"scannerLast.py","file_name":"scannerLast.py","file_ext":"py","file_size_in_byte":14705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34478991016","text":"\nade = 0\nbayo = 0\nleg_1 = \"l\"\nleg_2 = \"r\"\n\nfor i in range(10):\n ade_1 = input(\"Put a foot forward: \")\n bayo_1 = input(\"Put a foot forward: \")\n if ade_1 == \"l\" and bayo_1 == \"r\":\n ade += 1\n print(\"Ade Wins!!!\")\n elif ade_1 == \"r\" and bayo_1 == \"l\":\n print(\"player 1 Wins!!!\")\n ade += 1\n elif ade_1 == bayo_1:\n print(\"Bayo Wins!!!\")\n bayo += 1\nprint(\"Ade wins\", ade, \"time\", \"while\", \"Bayo wins\", bayo, \"times\")\n\n\n\n\n","repo_name":"everybees/python_with_cohorts","sub_path":"nine/fola/ten_ten.py","file_name":"ten_ten.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"69981454665","text":"from collections import defaultdict\nimport os\nimport sys # for the command-line params\nimport time\nimport xml.etree.ElementTree as ET\n\nimport requests\n\nimport config\nimport db\n\nconnection = db.Connection()\n\ndef load_xml(taxon, save_samples=True, save_tags=False):\n \"\"\"\n Loads the \"full text XML\" exported from a search of BioSamples and adds\n them to the database.\n\n Inputs:\n - taxon: The taxon ID from the NCBI taxonomy browser associated with the samples.\n \"\"\"\n print(f'\\n\\n\\n===================\\nProcessing XML for taxon {taxon}\\n==========\\n\\n')\n\n # load the XML file\n print('loading xml...')\n tree = ET.parse(f'./{taxon}.xml')\n biosamples = tree.getroot()\n print('processing samples!')\n # iterate through each entry in the file\n done = -1\n for sample in biosamples:\n done += 1\n if done % 10000 == 0:\n print(f' {done} out of {len(biosamples)} complete.')\n # find SRA ID of sample\n # example: SRS5588834 \n sra = None\n for x in sample.iter('Id'):\n if 'db' in x.attrib.keys() and x.attrib['db'] == 'SRA':\n sra = x.text\n if sra is None:\n continue # skip samples without an SRA sample\n\n # NOTE: we used to check for BioProject ID here,\n # but for some reason half the samples don't list a bioproject\n # even if they have one.\n\n if save_samples:\n # write sample into table\n with connection.db.cursor() as cursor:\n # Possibly beneficial: if we've already recorded this sample, this will error\n cursor.execute('INSERT INTO samples (srs, taxon) VALUES (%s, %s);', (sra, taxon))\n\n if save_tags:\n # go through all the attributes and tally them\n all_tags = {}\n for tag in sample.iter('Attribute'):\n text = tag.text.lower()\n if 'harmonized_name' in tag.attrib.keys():\n all_tags[tag.attrib['harmonized_name']] = text\n elif 'attribute_name' in tag.attrib.keys():\n all_tags[tag.attrib['attribute_name']] = text\n # add all the tags to the tag table\n with connection.db.cursor() as cursor:\n sql = 'INSERT INTO tags (srs, tag, value) VALUES (%s, %s, %s);'\n params = [(sra, tag, value) for (tag, value) in all_tags.items()]\n cursor.executemany(sql, params)\n\n print(f'{len(biosamples)} total samples')\n\ndef find_runs(count, per_query=50):\n \"\"\"\n Queries the NCBI eUtils API to use sample IDs (\"SRS\" codes)\n to get information about runs (\"SRR\" codes) that can then\n be downloaded as FASTQ files.\n\n Inputs:\n - count: int. The upper limit for how many entries to search in total.\n - per_query: int. The number of entries to request in each web request\n \"\"\"\n\n todo = connection.read(\"\"\"\n SELECT srs FROM samples\n WHERE srr IS NULL\n LIMIT %s\"\"\", (count,))\n\n todo = [x[0] for x in todo] # each ID is nested inside a tuple of length 1\n print(f'Found {len(todo)} samples to process')\n cursor = 0\n multiple_runs = 0\n while cursor < len(todo):\n if cursor > 0 and cursor % (per_query * 100) == 0:\n time.sleep(30) # chill out every 100 requests\n if cursor % 1000 == 0:\n print(f'COMPLETE: {cursor}')\n\n url = f'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?tool={config.tool}&email={config.email}&db=sra&usehistory=y&term='\n for x in range(0, per_query):\n url += f'{todo[cursor]}[accn] or '\n cursor += 1\n if cursor == len(todo): break # in case the total isn't a multiple of \"per_query\"\n url = url[:-4] # trim off trailing \" or \"\n if len(url) >1950:\n print(url)\n print('\\n\\n\\nURL IS TOO LONG! Bailing to avoid cutting off request.')\n exit(1)\n try:\n r = requests.get(url)\n except:\n print('ERROR: Error sending request for webenv data. Skipping.')\n time.sleep(3)\n continue\n\n try:\n tree = ET.fromstring(r.text)\n except:\n print('ERROR: Couldnt parse response retrieving webenv data. Skipping.')\n time.sleep(3)\n continue\n\n webenv = tree.find('WebEnv')\n if webenv is None:\n print('\\n---------\\n')\n print(r.text)\n print(\"WARNING: Got response without a 'webenv' field. Moving on.\")\n print('\\n---\\n')\n time.sleep(10)\n continue\n time.sleep(1)\n url = f'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?tool={config.tool}&email={config.email}&db=sra&query_key=1&WebEnv={webenv.text}'\n if len(url) >1950:\n print(url)\n print('\\n\\n\\nURL IS TOO LONG! Bailing to avoid cutting off request.')\n exit(1)\n\n r = requests.get(url)\n try:\n tree = ET.fromstring(r.text)\n except ET.ParseError:\n print(\"WARNING: Misformed response from call to eFetch. Skipping.\")\n time.sleep(10)\n continue\n multiple_runs += _record_data(tree)\n time.sleep(1)\n print(f\"\\n\\nTOTAL SAMPLES WITH MULTIPLE RUNS: {multiple_runs}.\\n\\n\")\n\ndef _record_data(data):\n \"\"\"Parses a response from the efetch endpoint that has info about\n all the samples in the query.\"\"\"\n multiple_runs = 0\n\n for package in data.findall('EXPERIMENT_PACKAGE'):\n sample = None\n tosave = {'run': []}\n\n for x in package.iter('SAMPLE'):\n if 'accession' in x.attrib.keys():\n sample = x.attrib['accession']\n for x in package.iter('RUN'):\n if 'accession' in x.attrib.keys():\n tosave['run'].append(x.attrib['accession'])\n if 'published' in x.attrib.keys():\n tosave['pubdate'] = x.attrib['published']\n if 'total_bases' in x.attrib.keys():\n tosave['total_bases'] = x.attrib['total_bases']\n for x in package.iter('EXTERNAL_ID'):\n if 'namespace' in x.attrib.keys():\n if x.attrib['namespace'] == 'BioProject':\n tosave['project'] = x.text\n break\n for x in package.iter('LIBRARY_STRATEGY'):\n tosave['library_strategy'] = x.text\n for x in package.iter('LIBRARY_SOURCE'):\n tosave['library_source'] = x.text\n\n # If we found multiple runs, combine them into a single string\n if len(tosave['run']) == 0:\n tosave['run'] = None\n elif len(tosave['run']) == 1:\n tosave['run'] = tosave['run'][0]\n else:\n print(f\"MULTIPLE RUNS! {len(tosave['run'])}\")\n multiple_runs += 1\n delim = ';'\n print(f\"{sample}: {tosave['run']} ({delim.join(tosave['run'])})\")\n tosave['run'] = delim.join(tosave['run'])\n\n with connection.db.cursor() as cursor:\n # If there is no SRA run identified, SKIP this entry.\n # Sometimes a sample will have multiple entries, one with\n # a run (and lots of metadata) and another without any info\n # but DIFFERENT metadata. We only want ones that have a run.\n if tosave.get('run') is not None:\n cursor.execute(\"\"\"\n UPDATE samples\n SET srr=%s\n WHERE srs=%s\n \"\"\", (tosave.get('run'), sample))\n else:\n continue\n\n if tosave.get('project') is not None:\n cursor.execute(\"\"\"\n UPDATE samples\n SET project=%s\n WHERE srs=%s\n \"\"\", (tosave.get('project'), sample))\n if tosave.get('library_strategy') is not None:\n cursor.execute(\"\"\"\n UPDATE samples\n SET library_strategy=%s\n WHERE srs=%s\n \"\"\", (tosave.get('library_strategy'), sample))\n if tosave.get('library_source') is not None:\n cursor.execute(\"\"\"\n UPDATE samples\n SET library_source=%s\n WHERE srs=%s\n \"\"\", (tosave.get('library_source'), sample))\n if tosave.get('pubdate') is not None:\n cursor.execute(\"\"\"\n UPDATE samples\n SET pubdate=%s\n WHERE srs=%s\n \"\"\", (tosave.get('pubdate'), sample))\n if tosave.get('total_bases') is not None:\n cursor.execute(\"\"\"\n UPDATE samples\n SET total_bases=%s\n WHERE srs=%s\n \"\"\", (tosave.get('total_bases'), sample))\n return multiple_runs\n\ndef write_lists(min_samples=10):\n \"\"\"\n Fetches a list of SRA projects from the local database and generates a list\n of samples for each project.\n\n Inputs:\n - min_samples: int. The minimum number of samples that a project needs to\n have to get a list generated.\n \"\"\"\n\n todo = connection.read(\"\"\"\n SELECT samplecount.project\n FROM (\n SELECT project, COUNT(srs) AS tally\n FROM samples\n WHERE srr IS NOT NULL AND project IS NOT NULL\n AND library_source IN ('GENOMIC','METAGENOMIC')\n AND library_strategy='AMPLICON'\n GROUP BY 1\n ORDER BY 2 ASC\n ) AS samplecount\n WHERE samplecount.tally >= %s\n AND samplecount.tally < 50\"\"\", (min_samples,))\n todo = [x[0] for x in todo] # each ID is nested inside a tuple of length 1\n\n project_samples = []\n\n for project in todo:\n samples = connection.read(\"\"\"\n SELECT s.srr\n FROM SAMPLES s\n WHERE srr IS NOT NULL\n AND library_source IN ('GENOMIC','METAGENOMIC')\n AND library_strategy='AMPLICON'\n AND project=%s\"\"\", (project,))\n samples = [x[0] for x in samples]\n project_samples.append((project, len(samples)))\n\n if os.path.exists(f'accession_lists/{project}/SraAccList.txt'):\n print(f'Project already recorded: {project}')\n continue\n\n os.mkdir(f'accession_lists/{project}')\n\n with open(f'accession_lists/{project}/SraAccList.txt','w') as f:\n for sample in samples:\n for run in sample.split(';'):\n f.write(f'{run}\\n')\n\n with connection.db.cursor() as cursor:\n sql = \"\"\"\n UPDATE samples SET exported=true\n WHERE srr IN (\n SELECT s.srr\n FROM SAMPLES s\n WHERE srr IS NOT NULL\n AND library_source IN ('GENOMIC','METAGENOMIC')\n AND library_strategy='AMPLICON'\n AND project=%s\n )\n \"\"\"\n cursor.execute(sql, (project,))\n\n with open(f'samples_per_project.csv','a') as f:\n for x in project_samples:\n f.write(f'{x[0]}, {x[1]}\\n')\n\n\nif __name__ == \"__main__\":\n load_xml('txid408170', save_samples=True, save_tags=False)\n load_xml('txid408170', save_samples=False, save_tags=True)\n\n # only command-line param is how many to do in this session\n todo = 2000 if len(sys.argv) < 2 else sys.argv[1]\n find_runs(todo, per_query=80)\n write_lists(min_samples=10)\n","repo_name":"blekhmanlab/compendium_v1","sub_path":"pipeline/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41120571423","text":"# 使用request库调用get请求\n\nimport requests\nimport json\n\n\nclass SendGetRequest:\n\n def __init__(self, host=None, endpoint=None):\n self.host = \"http://127.0.0.1:9999/\"\n self.endpoint = \"get\"\n\n # 1.不带参数的get请求\n def request_getnoparam(self):\n # str.join(item)\n # 括号里面只能有一个参数\n # 含义是将每一个成员以字符str分隔开再拼接成一个字符串\n # 成员可以是字符串、列表、元组、字典\n url = ''.join([self.host, self.endpoint])\n # 向网页发生请求,并返回状态码\n # get()方法返回一个requests.Response对象。\n response_data = requests.get(url)\n print(\"不带参数的get请求的返回值如下:\\n\", response_data)\n\n # r.text返回HTTP响应内容的字符串形式\n print(type(response_data.text))\n print(response_data.text)\n # eval是内置函数,将字符串当成有效的表达式 来求值并返回计算结果。\n print(eval(response_data.text))\n\n # 2.带参数的get请求\n def request_getwithparam(self, host=None, endpoint=None):\n url = ''.join([self.host, self.end_point])\n params = {\"show_env\": \"1\"}\n r = requests.get(url=url, params=params)\n print(r.text)\n\n def request_getwithheader(self):\n url = ''.join([self.host, self.endpoint])\n headers = {\"User-Agent\": \"test request headers\"}\n\n r = requests.get(url, headers=headers)\n print(eval(r.text)['headers']['User-Agent'])\n\n def request_getwithheaderandparam(self):\n url = ''.join([self.host, self.endpoint])\n headers = {\"User-Agent\": \"test request headers\"}\n params = {\"test_param1\": \"111\", \"test_param2\": \"222\"}\n\n r = requests.get(url, headers=headers, params=params)\n print(r.text)\n\n\nclass SendPostRequest:\n def __init__(self):\n self.host = \"http://127.0.0.1:9999/\"\n self.end_point = \"post\"\n\n def sendpost_with_dataandheader(self):\n url = ''.join([self.host, self.end_point])\n data = {\"key1\": \"value1\", \"key2\": \"value2\"}\n headers = {\"User-Agent\": \"test post headers\"}\n r = requests.post(url, data=data, headers=headers)\n print(r.text)\n\n def sendpost_uploadfile(self):\n url = ''.join([self.host, self.end_point])\n files = {\n 'file': open('testfile.txt', 'rb')\n }\n # 此处不写等号会怎么样?rb是什么模式\n r = requests.post(url, files=files)\n print(r.text)\n\nclass Cookie:\n def get_baidu_cookie(self):\n url = \"https://baidu.com\"\n # get是���什么的\n r = requests.get(url)\n # #将RequestsCookieJar转换成字典\n print(requests.utils.dict_from_cookiejar(r.cookies))\n\nclass Session:\n def __init__(self):\n self.host = \"http://127.0.0.1:9999/\"\n self.end_point = \"headers\"\n\n def send_session(self):\n url = ''.join([self.host, self.end_point])\n # url1 = \"http://127.0.0.1:9999/cookies/set/sessioncookie/123456789\"\n header1 = {\"test1\":\"111\"}\n header2 = {\"test2\":\"222\"}\n\n # 初始化一个session对象\n s = requests.session()\n # cookie的信息存在了session中\n s.headers.update(header1)\n r = s.get(url, headers = header2)\n print(r.text, \"--------------------\")\n\n\n# send_get_request = SendGetRequest()\n# send_get_request.request_getwithheaderandparam()\n#\n# send_post_request = SendPostRequest()\n# send_post_request.sendpost_uploadfile()\n#\n# cookie= Cookie()\n# baidu_cookie = cookie.get_baidu_cookie()\n\nsession_one = Session()\nsession_one.send_session()","repo_name":"ednahlili/send_requests_douban","sub_path":"send_requests.py","file_name":"send_requests.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12949535838","text":"import numpy as np\n\ny = np.array([3, 3/4, 1, 0, -9/4])\ny_predict = np.array([3, 2, 1, 0, -1])\nmean_y = np.mean(y)\n\nTSS = np.sum([(y[i]-y_predict[i])**2 for i in range(len(y))])\nRSS = np.sum([(elem - mean_y)**2 for elem in y])\n\n\n\n# 3d\n\nx = np.array([-2, -1, 0, 1, 2])\n\nb0 = 0.5 \nb1 = -9/8\n\nnew_y = [b0 + b1*x_i for x_i in x]\ns2 = np.sum([(new_y - y_i)**2 for y_i in y])/len(y)\n\nse1 = np.sqrt(s2*1/5)\n\nse2 = np.sqrt(s2*1/10)\n\n\n#2a\nprint(1/(1+np.exp(0.5)))\nprint(1/(1+np.exp(6-2.5-3.5)))","repo_name":"Starcrysis/WS_23_coding_abgaben","sub_path":"SDS/Ex3_1a.py","file_name":"Ex3_1a.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20782436551","text":"#!/usr/bin/python3\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.netutil\nimport tornado.httpserver\n\nimport webbrowser\n\nimport argparse\nimport os\n\n\n\ndef load_asset(asset):\n progpath = os.path.dirname(os.path.realpath(__file__))\n \n f = open(progpath+\"/assets/\"+asset, \"r\")\n asset = f.read()\n f.close()\n \n return asset\n\nclass AssetHandler(tornado.web.RequestHandler):\n def get(self):\n path = self.request.uri[len(\"/assets/\"):]\n \n if path.endswith(\".html\"):\n self.set_header(\"Content-Type\", \"text/html\")\n elif path.endswith(\".css\"):\n self.set_header(\"Content-Type\", \"text/css\")\n elif path.endswith(\".js\"):\n self.set_header(\"Content-Type\", \"text/javascript\")\n else:\n self.set_header(\"Content-Type\", \"text/plain\")\n \n try:\n self.write(load_asset(path))\n \n except FileNotFoundError:\n self.clear()\n self.set_status(404)\n self.finish(\"404 - File {0} cannot be found!\".format(path))\n \n return\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(load_asset(\"index.html\"))\n\n\nclass DataHandler(tornado.web.RequestHandler):\n def get(self):\n global PATH_PREFIX\n path = PATH_PREFIX + self.request.uri[len(\"/data\"):]\n \n try:\n f = open(path, \"r\")\n \n self.write(f.read())\n \n f.close()\n \n except FileNotFoundError:\n self.clear()\n self.set_status(404)\n self.finish(\"404 - File {0} cannot be found!\".format(path))\n\n\nclass StructureHandler(tornado.web.RequestHandler):\n def get(self):\n global PATH_PREFIX\n \n self.set_header(\"Content-Type\", \"text/html\")\n \n self.write(\"

    Structure

    \")\n self.write(\"
      \\n\")\n \n for root, dirs, files in os.walk(PATH_PREFIX):\n for f in files:\n if f.endswith(\".md\"):\n href = os.path.join(root, f)[len(PATH_PREFIX):]\n link = \"{0}\".format(href)\n self.write(\"
    • \")\n self.write(link)\n self.write(\"
    • \\n\")\n \n self.write(\"
    \\n\")\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/wiki/.*\", MainHandler),\n (r\"/assets/.*\", AssetHandler),\n (r\"/data/.*\", DataHandler),\n (r\"/structure\", StructureHandler)\n ])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Show Markdown files as Wiki\")\n parser.add_argument(\"--data\", help=\"Data directory\",\n default=os.getcwd())\n parser.add_argument(\"--port\", help=\"use this port for the HTTP server\",\n default=0)\n args = parser.parse_args()\n \n global PATH_PREFIX\n PATH_PREFIX = args.data\n \n print(\"Data directory is {0}\".format(PATH_PREFIX))\n \n app = make_app()\n sockets = tornado.netutil.bind_sockets(args.port, '')\n server = tornado.httpserver.HTTPServer(app)\n server.add_sockets(sockets)\n \n port = None\n \n for s in sockets:\n print('Listening on %s, port %d' % s.getsockname()[:2])\n if port is None:\n port = s.getsockname()[1]\n \n url = \"http://localhost:{0}/\".format(port)\n print(\"If the browser does not open automatically, please go to {0}.\".format(url))\n \n webbrowser.open(url)\n \n tornado.ioloop.IOLoop.current().start()\n\n\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python; indend-pasted-text false; remove-trailing-space off\n","repo_name":"penguineer/MarkdownGitWikiViewer","sub_path":"MarkdownGitWikiViewer.py","file_name":"MarkdownGitWikiViewer.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71898504264","text":"import ckanapi\nimport time\n\nclass CKANConnector:\n def __init__(self, server, apikey, resource_id = None, package_id = None):\n self.server = server\n self.apikey = apikey\n self.resource_id = resource_id\n self.package_id = package_id\n self.ckan = ckanapi.RemoteCKAN(self.server, apikey=self.apikey)\n\n def UpdateResource(self, path_of_input_file):\n resourceinfo = self.ckan.action.resource_show(id=self.resource_id)\n self.ckan.action.resource_update(id=self.resource_id,upload=open(path_of_input_file,'rb'), format=resourceinfo[\"format\"])\n\n def UpdateDataStore(self, records):\n if type(records) is dict:\n result = self.ckan.action.resource_show(id=self.resource_id)\n if result[\"datastore_active\"] is True:\n self.ckan.action.datastore_upsert(resource_id=self.resource_id, records=[records], force=True)\n else: \n self.ckan.action.datastore_create(resource_id=self.resource_id, records=[records], \n primary_key=[list(records.keys())[-1]], force=True)\n\n def CreateResource(self, path_of_input_file, name):\n if self.package_id:\n self.ckan.action.resource_create(package_id=self.package_id,upload=open(path_of_input_file,'rb'), name=name)\n\n# if __name__ == '__main__':\n# tmp = CKANConnector(\"http://localhost:5000\", \"b3c5e73a-52ef-4156-96eb-ef022746ea74\", \n# \"c6e659e0-4450-49f3-833d-a388b966f1b4\")\n\n# tmp.UpdateDataStore({'a' : 'b'})\n\n# ckan.UpdateDataStore(json_data)\n","repo_name":"congbk92/opendds-ckan","sub_path":"source/CKANConnector.py","file_name":"CKANConnector.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73432085705","text":"import plotly.express as px\nimport plotly.graph_objects as go\n\nimport json\nfrom scipy.spatial.distance import euclidean\nfrom sklearn.preprocessing import QuantileTransformer\n\nfrom smap.coloring.Colorscale import get_colorscale\nfrom smap.coloring.Colors import str_to_rgb\nfrom smap.smap import get_smap\n\n\ndef get_seriation_map(order_method):\n smap = get_smap()\n df = smap.get_feature_df()\n\n order = order_method.get_order(df)\n df['order'] = order\n df = df.sort_values(by='order')\n df.drop(['order'], axis=1, inplace=True)\n\n bsu_order = [0 for _ in range(len(order))]\n for idx, bsu in enumerate(order):\n bsu_order[bsu] = idx\n\n geojson = json.loads(smap.regions.to_json())\n\n fig = go.Figure()\n\n if order_method.clustering:\n z = [str(x) for x in order]\n colorbar = {\n 'title': 'Cluster',\n }\n else:\n colorscheme = px.colors.sequential.Rainbow\n colorscheme = [str_to_rgb(x) for i, x in enumerate(colorscheme) if i not in {1,3,6}]\n distances = [0.0] + [euclidean(df.iloc[order[i-1]], df.iloc[order[i]]) for i in range(1, len(bsu_order))]\n colorscale = get_colorscale(distances, colorscheme)\n colorbar = {\n 'title': 'Order'\n }\n\n fig.add_trace(go.Choroplethmapbox(geojson=geojson, locations=df.region_id,\n z=order,\n colorscale=colorscale,\n marker_opacity=0.8, marker_line_width=0,\n colorbar=colorbar))\n fig.update_layout(mapbox={\n 'accesstoken': open('./mapbox.tk').read(),\n 'style': 'mapbox://styles/aldubray/ckkmt2c1x52bj17qt3kqtlxi8',\n 'center': {\n 'lat': 50.50000,\n 'lon': 4.441393\n },\n },\n mapbox_zoom=7,\n height=800,\n width=1000,\n )\n\n\n scaler = QuantileTransformer()\n feats = []\n for f in smap.features:\n if f.categorical:\n for v in f.values:\n feats.append(v)\n else:\n feats.append(f.name)\n hm = go.Figure(go.Heatmap(z=scaler.fit_transform(df[feats]),\n x=feats,\n colorscale='Greys'))\n return fig, hm\n","repo_name":"AlexandreDubray/seriation-map","sub_path":"smap/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14415935363","text":"def readDFA(file):\n with open(file, \"r\") as f:\n content = f.read()\n content = content.split(\"\\n\")\n state = content[0]\n input = content[1]\n start = content[2]\n final = content[3]\n for i in range(4, len(content)):\n transisi[i]\n '''\n tidak selesai :(\n '''\n \n \n\ndef validated(var):\n state, input, start, final, transisi = readDFA(\"dfa.txt\")\n stateNow = start\n i = 0\n while(i < len(var) and stateNow != 'x'):\n if var[i] in (['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'\n 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']):\n tipe = \"huruf\"\n elif var[i] in ([\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]):\n tipe = \"angka\"\n elif var[i] == \"_\":\n tipe = \"underscore\"\n else:\n tipe = \"tidak diketahui\"\n \n if tipe in input:\n stateNow = transisi[f\"{stateNow},{tipe}\"]\n else:\n return False\n \n i += 1\n \n if stateNow == final:\n return True\n else:\n return False","repo_name":"kibare/Python-Compiler","sub_path":"FA.py","file_name":"FA.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24442423648","text":"def show(chessboard):\n \"\"\"Shows the chessboard in the console.\n DOES NOT WORK UNTIL ALL CLASES: Pawn, Knight, Queen, King, Rook, Bishop ARE CREATED!!!\n \"\"\"\n WHITE = {\n Pawn: chr(9817),\n Knight: chr(9816),\n Queen: chr(9813),\n King: chr(9812),\n Rook: chr(9814),\n Bishop: chr(9815),\n }\n BLACK = {\n Pawn: chr(9823),\n Knight: chr(9822),\n Queen: chr(9819),\n King: chr(9818),\n Rook: chr(9820),\n Bishop: chr(9821),\n }\n for y in range(7, -1, -1):\n print(y+1, end='\\t')\n for x in range(8):\n if chessboard.board[x][y] is not None:\n if chessboard.board[x][y].color == 'white':\n print(WHITE[type(chessboard.board[x][y])], end='\\t')\n else:\n print(BLACK[type(chessboard.board[x][y])], end='\\t')\n else:\n print('\\t', end='')\n print('\\n')\n print('\\t', end='')\n for x in range(8):\n print(chr(65+x), end='\\t')\n print()\n\n\ndef filter_moves(moves): # aby pionki nie wychodzily za szachownice\n output = []\n for move in moves:\n if 0 <= move[0] < 8 and 0 <= move[1] < 8:\n output.append(move)\n return output\n\n\nclass Chessboard:\n def __init__(self):\n self.color = 'white'\n self.board = [\n [None] * 8,\n [None] * 8,\n [None] * 8,\n [None] * 8,\n [None] * 8,\n [None] * 8,\n [None] * 8,\n [None] * 8, # ten przecinek może być w niczym nie przeszkadza\n ]\n\n def setup(self):\n self.board[0][0] = Rook('white', 0, 0)\n self.board[1][0] = Knight('white', 1, 0)\n self.board[2][0] = Bishop('white', 2, 0)\n self.board[3][0] = Queen('white', 3, 0)\n self.board[4][0] = King('white', 4, 0)\n self.board[5][0] = Bishop('white', 5, 0)\n self.board[6][0] = Knight('white', 6, 0)\n self.board[7][0] = Rook('white', 7, 0)\n for x in range(0, 8):\n self.board[x][1] = Pawn('white', x, 1)\n self.board[x][6] = Pawn('black', x, 6)\n self.board[0][7] = Rook('black', 0, 7)\n self.board[1][7] = Knight('black', 1, 7)\n self.board[2][7] = Bishop('black', 2, 7)\n self.board[3][7] = Queen('black', 3, 7)\n self.board[4][7] = King('black', 4, 7)\n self.board[5][7] = Bishop('black', 5, 7)\n self.board[6][7] = Knight('black', 6, 7)\n self.board[7][7] = Rook('black', 7, 7)\n\n def list_allowed_moves(self, x, y):\n if self.board[x][y] == None: # sprawdzamy czy miejsce x y jest puste\n return None\n if self.board[x][y].color != self.color: # sprawdzamy czy kolor gracza zgadza sie z kolorem pionka\n return None\n return self.board[x][y].list_allowed_moves(self) # pionku w miejscu x y jakie sa twoje dozwolone ruchy\n # self w obrebie tej klasy jest chessboardem\n\n def move(self, from_x, from_y, to_x, to_y):\n allowed_moves = self.list_allowed_moves(from_x, from_y) # pytamy o dozwolone ruchy z miejsca from_x, from_y\n if allowed_moves == None:\n raise ValueError('Tu nie ma twojego pionka!')\n\n if (to_x, to_y) not in allowed_moves: # pytamy czy mozna ruszyc pionek z x to y\n raise ValueError('Ten ruch jest niemożliwy!')\n\n self.board[to_x][to_y] = self.board[from_x][from_y] # pionek przesuwa sie na nowe pole\n\n self.board[from_x][from_y] = None # miejsce zostaje puste\n\n self.board[to_x][to_y].move(to_x, to_y) # informujemy pionek pod nowym adresem o jego nowej pozycji\n\n if self.color == 'white': # zmieniamy kolor\n self.color = 'black'\n else:\n self.color = 'white'\n\n def is_enemy(self, x, y, my_color): #aby zobaczyć czy są wrogowie i moc ich bic lub nie moc sie ruszyc\n if self.board[x][y] is None:\n return False # empty - nie ma wroga\n if self.board[x][y].color != my_color:\n return True # enemy\n return False # jest friend\n\n def is_friend(self, x, y, my_color): #aby sprawdzic czy na przeciwko jest pionek\n if self.board[x][y] is None:\n return False #empty\n if self.board[x][y].color == my_color:\n return True #friend\n return False #enemy\n\n def is_anyone(self, x, y):\n return self.board[x][y] is not None #jezeli ktos stoi to nie jest None czyli True\n\n def is_empty(self, x ,y):\n return not self.is_anyone(x, y) #jeżeli nie jest is_anyone to jest empty\n\nclass Piece:\n def __init__(self, color, x, y):\n self.color = color\n self.x = x\n self.y = y\n\n def move(self, x, y):\n self.x = x\n self.y = y\n\n\nclass Pawn(Piece):\n def list_allowed_moves(self, chessboard):\n moves = []\n\n if self.color == 'white' and self.y == 7:\n return []\n elif self.color == 'black' and self.y == 0:\n return []\n # koniec planszy to definitywny koniec ruchów wiec return zostaje\n\n if self.color == \"white\":\n if chessboard.is_empty(self.x, self.y + 1):\n moves.append((self.x, self.y + 1))\n if self.y == 1 and chessboard.is_empty(self.x, self.y + 2):\n moves.append((self.x, self.y + 2))\n elif self.color == \"black\":\n if chessboard.is_empty(self.x, self.y - 1):\n moves.append((self.x, self.y - 1))\n if self.y == 6 and chessboard.is_empty(self.x, self.y - 2):\n moves.append((self.x, self.y - 2))\n\n # elif self.color == 'white' and self.y == 1: # bo pionek sie jeszcze nie ruszal\n # moves += [(self.x, self.y + 1), (self.x, self.y + 2)]\n # elif self.color == 'black' and self.y == 6: # bo pionek sie jeszcze nie ruszal\n # moves += [(self.x, self.y - 1), (self.x, self.y - 2)]\n # elif self.color == 'white':\n # moves += [(self.x, self.y + 1)]\n # elif self.color == 'black':\n # moves += [(self.y, self.y - 1)]\n\n if self.color == 'white':\n if chessboard.is_enemy(self.x+1, self.y+1, self.color):\n moves.append( (self.x+1, self.y+1) )\n if chessboard.is_enemy(self.x-1, self.y+1, self.color):\n moves.append( (self.x-1, self.y+1) )\n elif self.color == 'black':\n if chessboard.is_enemy(self.x + 1, self.y - 1, self.color):\n moves.append((self.x + 1, self.y - 1))\n if chessboard.is_enemy(self.x - 1, self.y - 1, self.color):\n moves.append((self.x - 1, self.y - 1))\n\n return filter_moves(moves)\n\n\nclass Knight(Piece):\n def list_allowed_moves(self, chessboard):\n moves = [\n (self.x + 1, self.y + 2),\n (self.x + 2, self.y + 1),\n (self.x - 1, self.y + 2),\n (self.x - 2, self.y + 1),\n (self.x + 1, self.y - 2),\n (self.x + 2, self.y - 1),\n (self.x - 1, self.y - 2),\n (self.x - 2, self.y - 1),\n ]\n return filter_moves(moves)\n\n\nclass Rook(Piece):\n def list_allowed_moves(self, chessboard):\n moves = []\n for i in range(1, 8): # bo musie sie ruszyć a nie\n moves.append((self.x + i, self.y)) # aby dodać do move ale tuple\n moves.append((self.x - i, self.y))\n moves.append((self.x, self.y + i))\n moves.append((self.x, self.y - i))\n good_moves = filter_moves(moves) # można ale nie trzeba\n return good_moves\n\n\nclass King(Piece):\n def list_allowed_moves(self, chessboard):\n moves = [\n (self.x, self.y + 1),\n (self.x, self.y - 1),\n (self.x + 1, self.y),\n (self.x - 1, self.y),\n (self.x + 1, self.y + 1),\n (self.x - 1, self.y + 1),\n (self.x + 1, self.y - 1),\n (self.x - 1, self.y - 1),\n ]\n return filter_moves(moves)\n\n\nclass Bishop(Piece):\n def list_allowed_moves(self, chessboard):\n moves = []\n for i in range(1, 8):\n moves.append((self.x + i, self.y + i))\n moves.append((self.x - i, self.y - i))\n moves.append((self.x - i, self.y + i))\n moves.append((self.x + i, self.y - i))\n return filter_moves(moves)\n\n\nclass Queen(Piece):\n def list_allowed_moves(self, chessboard):\n moves = []\n for i in range(1, 8):\n moves.append((self.x + i, self.y))\n moves.append((self.x - i, self.y))\n moves.append((self.x, self.y + i))\n moves.append((self.x, self.y - i))\n moves.append((self.x + i, self.y + i))\n moves.append((self.x - i, self.y - i))\n moves.append((self.x - i, self.y + i))\n moves.append((self.x + i, self.y - i))\n return filter_moves(moves)\n","repo_name":"MarekAlan/Python_Basics_2","sub_path":"Chess/chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":8940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11772375611","text":"from sqlalchemy import Column,Integer,String\n'''\n文章表:文章标题/点击量\n'''\nfrom .base import db\n#定义数据模型(一个类就是一张表/一个属性就是一个字段)\nclass Article(db.Model):\n\tid = Column(Integer,primary_key=True,autoincrement=True)\n\ttitle = Column(String(128),nullable=True)\n\tcount = Column(Integer,nullable=True)\n\tcount2 = Column(Integer, nullable=True)\n\n#代码 -》数据模型 ——》数据库\n\nclass Banner(db.Model):\n\tid = Column(Integer,primary_key=True,autoincrement=True)\n\ttitle = Column(String(128),nullable=True)\n\tcover_url = Column(String(255),nullable=True)\n\nclass News(db.Model):\n\tid = Column(Integer,primary_key=True,autoincrement=True)\n\ttitle = Column(String(128),nullable=False)\n\tsub_title = Column(String(128),nullable=True)\n\tcontent = Column(String(255),nullable=True)\n\n","repo_name":"EltricLee/2020-xgll-sanchuang","sub_path":"flask_project_ex/models/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16968497969","text":"import dolfin as dl\nimport ufl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\nimport sys\nimport os\nsys.path.append( os.environ.get('HIPPYLIB_BASE_DIR', \"../../\") )\nfrom hippylib import *\n\n\ndef u_boundary(x, on_boundary):\n return on_boundary\n\nclass Poisson:\n def __init__(self, mesh, Vh, prior):\n \"\"\"\n Construct a model by proving\n - the mesh\n - the finite element spaces for the STATE/ADJOINT variable and the PARAMETER variable\n - the prior information\n \"\"\"\n self.mesh = mesh\n self.Vh = Vh\n \n # Initialize Expressions\n mtrue_exp = dl.Expression('std::log(2 + 7*(std::pow(std::pow(x[0] - 0.5,2) + std::pow(x[1] - 0.5,2),0.5) > 0.2))',\n element=Vh[PARAMETER].ufl_element())\n self.mtrue = dl.interpolate(mtrue_exp, self.Vh[PARAMETER]).vector()\n self.f = dl.Constant(1.0)\n self.u_o = dl.Vector()\n \n self.u_bdr = dl.Constant(0.0)\n self.u_bdr0 = dl.Constant(0.0)\n self.bc = dl.DirichletBC(self.Vh[STATE], self.u_bdr, u_boundary)\n self.bc0 = dl.DirichletBC(self.Vh[STATE], self.u_bdr0, u_boundary)\n \n # Assemble constant matrices \n self.prior = prior\n self.Wuu = self.assembleWuu()\n \n\n self.computeObservation(self.u_o)\n \n self.A = None\n self.At = None\n self.C = None\n self.Wmm = None\n self.Wmu = None\n \n self.gauss_newton_approx=False\n \n self.solver = PETScKrylovSolver(mesh.mpi_comm(), \"cg\", amg_method())\n self.solver_fwd_inc = PETScKrylovSolver(mesh.mpi_comm(), \"cg\", amg_method())\n self.solver_adj_inc = PETScKrylovSolver(mesh.mpi_comm(), \"cg\", amg_method())\n \n self.solver.parameters[\"relative_tolerance\"] = 1e-15\n self.solver.parameters[\"absolute_tolerance\"] = 1e-20\n self.solver_fwd_inc.parameters = self.solver.parameters\n self.solver_adj_inc.parameters = self.solver.parameters\n \n def generate_vector(self, component=\"ALL\"):\n \"\"\"\n Return the list x=[u,m,p] where:\n - u is any object that describes the state variable\n - m is a Vector object that describes the parameter variable.\n (Need to support linear algebra operations)\n - p is any object that describes the adjoint variable\n \n If component is STATE, PARAMETER, or ADJOINT return x[component]\n \"\"\"\n if component == \"ALL\":\n x = [dl.Vector(), dl.Vector(), dl.Vector()]\n self.Wuu.init_vector(x[STATE],0)\n self.prior.init_vector(x[PARAMETER],0)\n self.Wuu.init_vector(x[ADJOINT], 0)\n elif component == STATE:\n x = dl.Vector()\n self.Wuu.init_vector(x,0)\n elif component == PARAMETER:\n x = dl.Vector()\n self.prior.init_vector(x,0)\n elif component == ADJOINT:\n x = dl.Vector()\n self.Wuu.init_vector(x,0)\n \n return x\n \n def init_parameter(self, m):\n \"\"\"\n Reshape m so that it is compatible with the parameter variable\n \"\"\"\n self.prior.init_vector(m,0)\n \n def assembleA(self,x, assemble_adjoint = False, assemble_rhs = False):\n \"\"\"\n Assemble the matrices and rhs for the forward/adjoint problems\n \"\"\"\n trial = dl.TrialFunction(self.Vh[STATE])\n test = dl.TestFunction(self.Vh[STATE])\n m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])\n Avarf = ufl.inner(ufl.exp(m)*ufl.grad(trial), ufl.grad(test))*ufl.dx\n if not assemble_adjoint:\n bform = ufl.inner(self.f, test)*ufl.dx\n Matrix, rhs = dl.assemble_system(Avarf, bform, self.bc)\n else:\n # Assemble the adjoint of A (i.e. the transpose of A)\n u = vector2Function(x[STATE], self.Vh[STATE])\n obs = vector2Function(self.u_o, self.Vh[STATE])\n bform = ufl.inner(obs - u, test)*ufl.dx\n Matrix, rhs = dl.assemble_system(dl.adjoint(Avarf), bform, self.bc0)\n \n if assemble_rhs:\n return Matrix, rhs\n else:\n return Matrix\n \n def assembleC(self, x):\n \"\"\"\n Assemble the derivative of the forward problem with respect to the parameter\n \"\"\"\n trial = dl.TrialFunction(self.Vh[PARAMETER])\n test = dl.TestFunction(self.Vh[STATE])\n u = vector2Function(x[STATE], Vh[STATE])\n m = vector2Function(x[PARAMETER], Vh[PARAMETER])\n Cvarf = ufl.inner(ufl.exp(m) * trial * ufl.grad(u), ufl.grad(test)) * ufl.dx\n C = dl.assemble(Cvarf)\n# print ( \"||m||\", x[PARAMETER].norm(\"l2\"), \"||u||\", x[STATE].norm(\"l2\"), \"||C||\", C.norm(\"linf\") )\n self.bc0.zero(C)\n return C\n \n def assembleWuu(self):\n \"\"\"\n Assemble the misfit operator\n \"\"\"\n trial = dl.TrialFunction(self.Vh[STATE])\n test = dl.TestFunction(self.Vh[STATE])\n varf = ufl.inner(trial, test)*ufl.dx\n Wuu = dl.assemble(varf)\n Wuu_t = Transpose(Wuu)\n self.bc0.zero(Wuu_t)\n Wuu = Transpose(Wuu_t)\n self.bc0.zero(Wuu)\n return Wuu\n \n def assembleWmu(self, x):\n \"\"\"\n Assemble the derivative of the parameter equation with respect to the state\n \"\"\"\n trial = dl.TrialFunction(self.Vh[STATE])\n test = dl.TestFunction(self.Vh[PARAMETER])\n p = vector2Function(x[ADJOINT], self.Vh[ADJOINT])\n m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])\n varf = ufl.inner(ufl.exp(m)*ufl.grad(trial),ufl.grad(p))*test*ufl.dx\n Wmu = dl.assemble(varf)\n Wmu_t = Transpose(Wmu)\n self.bc0.zero(Wmu_t)\n Wmu = Transpose(Wmu_t)\n return Wmu\n \n def assembleWmm(self, x):\n \"\"\"\n Assemble the derivative of the parameter equation with respect to the parameter (Newton method)\n \"\"\"\n trial = dl.TrialFunction(self.Vh[PARAMETER])\n test = dl.TestFunction(self.Vh[PARAMETER])\n u = vector2Function(x[STATE], self.Vh[STATE])\n m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])\n p = vector2Function(x[ADJOINT], self.Vh[ADJOINT])\n varf = ufl.inner(ufl.grad(p),ufl.exp(m)*ufl.grad(u))*trial*test*ufl.dx\n return dl.assemble(varf)\n\n \n def computeObservation(self, u_o):\n \"\"\"\n Compute the synthetic observation\n \"\"\"\n x = [self.generate_vector(STATE), self.mtrue, None]\n A, b = self.assembleA(x, assemble_rhs = True)\n \n A.init_vector(u_o, 1)\n dl.solve(A, u_o, b, \"cg\", amg_method())\n \n # Create noisy data, ud\n MAX = u_o.norm(\"linf\")\n parRandom.normal_perturb(.01 * MAX, u_o)\n \n def cost(self, x):\n \"\"\"\n Given the list x = [u,m,p] which describes the state, parameter, and\n adjoint variable compute the cost functional as the sum of \n the misfit functional and the regularization functional.\n \n Return the list [cost functional, regularization functional, misfit functional]\n \n Note: p is not needed to compute the cost functional\n \"\"\" \n assert x[STATE] is not None\n \n diff = x[STATE] - self.u_o\n Wuudiff = self.Wuu*diff\n misfit = .5 * diff.inner(Wuudiff)\n \n Rm = dl.Vector()\n self.prior.init_vector(Rm,0)\n self.prior.R.mult(x[PARAMETER], Rm)\n reg = .5 * x[PARAMETER].inner(Rm)\n \n cost = misfit + reg\n \n return cost, reg, misfit\n \n def solveFwd(self, out, x):\n \"\"\"\n Solve the forward problem.\n \"\"\"\n A, b = self.assembleA(x, assemble_rhs = True)\n A.init_vector(out, 1)\n\n self.solver.set_operator(A)\n self.solver.solve(out,b)\n\n \n def solveAdj(self, out, x):\n \"\"\"\n Solve the adjoint problem.\n \"\"\"\n At, badj = self.assembleA(x, assemble_adjoint = True,assemble_rhs = True)\n At.init_vector(out, 1)\n \n self.solver.set_operator(At)\n self.solver.solve(out,badj)\n \n# print (\"ADJ\", (self.At*out - badj).norm(\"l2\")/badj.norm(\"l2\"), nit)\n \n def evalGradientParameter(self,x, mg, misfit_only=False):\n \"\"\"\n Evaluate the gradient for the variation parameter equation at the point x=[u,m,p].\n Parameters:\n - x = [u,m,p] the point at which to evaluate the gradient.\n - mg the variational gradient (g, mtest) being mtest a test function in the parameter space\n (Output parameter)\n \n Returns the norm of the gradient in the correct inner product g_norm = sqrt(g,g)\n \"\"\" \n C = self.assembleC(x)\n\n self.prior.init_vector(mg,0)\n C.transpmult(x[ADJOINT], mg)\n if misfit_only == False:\n Rm = dl.Vector()\n self.prior.init_vector(Rm,0)\n self.prior.R.mult(x[PARAMETER], Rm) \n mg.axpy(1., Rm)\n \n g = dl.Vector()\n self.prior.init_vector(g,1)\n \n self.prior.Msolver.solve(g, mg)\n g_norm = np.sqrt( g.inner(mg) )\n \n return g_norm\n \n \n def setPointForHessianEvaluations(self, x, gauss_newton_approx=False): \n \"\"\"\n Specify the point x = [u,m,p] at which the Hessian operator (or the Gauss-Newton approximation)\n need to be evaluated.\n \"\"\" \n self.gauss_newton_approx = gauss_newton_approx \n self.A = self.assembleA(x)\n self.At = self.assembleA(x, assemble_adjoint=True )\n self.C = self.assembleC(x)\n if gauss_newton_approx:\n self.Wmu = None\n self.Wmm = None\n else:\n self.Wmu = self.assembleWmu(x)\n self.Wmm = self.assembleWmm(x)\n \n self.solver_fwd_inc.set_operator(self.A)\n self.solver_adj_inc.set_operator(self.At)\n\n \n def solveFwdIncremental(self, sol, rhs):\n \"\"\"\n Solve the incremental forward problem for a given rhs\n \"\"\"\n self.A.init_vector(sol,1)\n self.solver_fwd_inc.solve(sol,rhs)\n \n def solveAdjIncremental(self, sol, rhs):\n \"\"\"\n Solve the incremental adjoint problem for a given rhs\n \"\"\" \n self.At.init_vector(sol,1)\n self.solver_adj_inc.solve(sol, rhs)\n# print (\"AdjInc\", (self.At*sol-rhs).norm(\"l2\")/rhs.norm(\"l2\"), nit)\n \n def applyC(self, dm, out):\n self.C.mult(dm,out)\n \n def applyCt(self, dp, out):\n self.C.transpmult(dp,out)\n \n def applyWuu(self, du, out, gn_approx=False):\n self.Wuu.mult(du, out)\n \n def applyWum(self, dm, out):\n if self.gauss_newton_approx:\n out.zero()\n else:\n self.Wmu.transpmult(dm,out)\n\n \n def applyWmu(self, du, out):\n if self.gauss_newton_approx:\n out.zero()\n else:\n self.Wmu.mult(du, out)\n \n def applyR(self, dm, out):\n self.prior.R.mult(dm, out)\n \n def Rsolver(self): \n return self.prior.Rsolver\n \n def applyWmm(self, dm, out):\n if self.gauss_newton_approx:\n out.zero()\n else:\n self.Wmm.mult(dm, out)\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Model Continuous observations')\n parser.add_argument('--nx',\n default=64,\n type=int,\n help=\"Number of elements in x-direction\")\n parser.add_argument('--ny',\n default=64,\n type=int,\n help=\"Number of elements in y-direction\")\n args = parser.parse_args()\n try:\n dl.set_log_active(False)\n except:\n pass\n nx = args.nx\n ny = args.ny\n mesh = dl.UnitSquareMesh(nx, ny)\n \n rank = dl.MPI.rank(mesh.mpi_comm())\n nproc = dl.MPI.size(mesh.mpi_comm())\n \n Vh2 = dl.FunctionSpace(mesh, 'Lagrange', 2)\n Vh1 = dl.FunctionSpace(mesh, 'Lagrange', 1)\n Vh = [Vh2, Vh1, Vh2]\n \n prior = LaplacianPrior(Vh[PARAMETER], gamma=1e-8, delta=1e-9)\n model = Poisson(mesh, Vh, prior)\n \n m0 = dl.interpolate(dl.Expression(\"sin(x[0])\", element=Vh[PARAMETER].ufl_element()), Vh[PARAMETER])\n modelVerify(model, m0.vector(), is_quadratic = False, verbose = (rank==0))\n\n m0 = dl.interpolate(dl.Constant(0.0),Vh[PARAMETER])\n parameters = ReducedSpaceNewtonCG_ParameterList()\n parameters[\"rel_tolerance\"] = 1e-9\n parameters[\"abs_tolerance\"] = 1e-12\n parameters[\"max_iter\"] = 25\n parameters[\"globalization\"] = \"LS\"\n parameters[\"GN_iter\"] = 6\n if rank != 0:\n parameters[\"print_level\"] = -1\n \n solver = ReducedSpaceNewtonCG(model, parameters)\n\n \n x = solver.solve([None, m0.vector(), None])\n \n if rank == 0:\n if solver.converged:\n print (\"\\nConverged in \", solver.it, \" iterations.\")\n else:\n print (\"\\nNot Converged\")\n\n print (\"Termination reason: \", solver.termination_reasons[solver.reason])\n print (\"Final gradient norm: \", solver.final_grad_norm)\n print (\"Final cost: \", solver.final_cost)\n \n model.setPointForHessianEvaluations(x, gauss_newton_approx=False)\n Hmisfit = ReducedHessian(model, misfit_only=True)\n p = 20\n k = 50\n Omega = MultiVector(x[PARAMETER], k+p)\n parRandom.normal(1., Omega)\n\n d, U = doublePassG(Hmisfit, prior.R, prior.Rsolver, Omega, k, s=1, check=False)\n \n xxname = [\"state\", \"parameter\", \"adjoint\"]\n xx = [vector2Function(x[i], Vh[i], name=xxname[i]) for i in range(len(Vh))]\n \n with dl.XDMFFile(mesh.mpi_comm(), \"results/results.xdmf\") as fid:\n fid.parameters[\"functions_share_mesh\"] = True\n fid.parameters[\"rewrite_function_mesh\"] = False \n \n fid.write(xx[STATE],0)\n fid.write(xx[PARAMETER],0)\n fid.write(vector2Function(model.mtrue, Vh[PARAMETER], name = \"true parameter\"), 0)\n fid.write(vector2Function(prior.mean, Vh[PARAMETER], name = \"prior mean\"), 0)\n fid.write(xx[ADJOINT],0)\n fid.write(vector2Function(model.u_o, Vh[STATE], name = \"observation\"), 0)\n \n U.export(Vh[PARAMETER], \"results/evect.xdmf\", varname = \"gen_evects\", normalize = True)\n if rank == 0:\n np.savetxt(\"results/eigevalues.dat\", d)\n\n if rank == 0:\n plt.figure()\n plt.plot(range(0,k), d, 'b*',range(0,k), np.ones(k), '-r')\n plt.yscale('log')\n plt.show()\n \n","repo_name":"hippylib/hippylib","sub_path":"applications/poisson/model_continuous_obs.py","file_name":"model_continuous_obs.py","file_ext":"py","file_size_in_byte":14655,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"81"} +{"seq_id":"44275769352","text":"import shutil # All credits for this function goes to Nick Stinemates on stackoverflow\nimport os # Link can be found here https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder\n\n\ndef deleter():\n folder = 'finalPDF'\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n\ndeleter()\n","repo_name":"GarretAnd/Parting-Wishes","sub_path":"Cleaner.py","file_name":"Cleaner.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7930385885","text":"# encoding: utf-8\n\n\n\"\"\"\n@author: yp\n@software: PyCharm\n@file: fakerDemo.py\n@time: 2019/8/16 0016 09:33\n\"\"\"\nfrom faker import Faker\n\n\"\"\"\n Faker是一个Python包,开源的GITHUB项目,主要用来创建伪数据,使用Faker包,无需再手动生成或者手\n写随机数来生成数据,只需要调用Faker提供的方法,即可完成数据的生成\n zh_CN en_AU\n \n https://www.jianshu.com/p/6bd6869631d9 ->>faker的方法一览\n\"\"\"\n\nfakerdata = Faker(locale='zh_CN')\n\n#姓名\nfaker_name=fakerdata.name()\n# print(faker_name)\n\n#地址\nfaker_add=fakerdata.address()\n# print(faker_add)\n\n\"\"\"\ndistrict():区geo_coordinate():地理坐标latitude():地理坐标(纬度)\nlongitude():地理坐标(经度)lexify():替换所有问号(“?”)带有随机字母的事件。\nnumerify():三位随机数字postcode():邮编province():省份\nstreet_address():街道地址street_name():街道名street_suffix():街、路\nrandom_digit():0~9随机数random_digit_not_null():1~9的随机数\nrandom_element():随机字母random_int():随���数字,默认0~9999,可以通过设置min,max来设置\nrandom_letter():随机字母random_number():随机数字,参数digits设置生成的数字位数\ncolor_name():随机颜色名hex_color():随机HEX颜色rgb_color():随机RGB颜色\nsafe_color_name():随机安全色名safe_hex_color():随机安全HEX颜色\nbs():随机公司服务名company():随机公司名(长)company_prefix():随机公司名(短)\ncompany_suffix():公司性质credit_card_expire():随机信用卡到期日\ncredit_card_full():生成完整信用卡信息credit_card_number():信用卡号\ncredit_card_provider():信用卡类型credit_card_security_code():信用卡安全码\ncurrency_code():货币编码am_pm():AM/PMcentury():随机世纪date():随机日期\ndate_between():随机生成指定范围内日期,参数:start_date,end_date取值:具体日期或者today,-30d,-30y类似\ndate_between_dates():随机生成指定范围内日期,用法同上date_object():随机生产从1970-1-1到指定日期的随机日期。\ndate_this_month():date_this_year():date_time():随机生成指定时间(1970年1月1日至今)\ndate_time_ad():生成公元1年到现在的随机时间date_time_between():用法同\ndatesfuture_date():未来日期future_datetime():未来时间month():随机月份\nmonth_name():随机月份(英文)past_date():随机生成已经过去的日期\npast_datetime():随机生成已经过去的时间time():随机24小时时间\ntimedelta():随机获取时间差time_object():随机24小时时间,time对象\ntime_series():随机TimeSeries对象timezone():随机时区\nunix_time():随机Unix时间year():随机年份file_extension():随机文件扩展名\nfile_name():随机文件名(包含扩展名,不包含路径)file_path():随机文件路径(包含文件名,扩展名)\nmime_type():随机mime Typeascii_company_email():随机ASCII公司邮箱名\nascii_email():随机ASCII邮箱ascii_free_email():ascii_safe_email():company_email():domain_name():生成域名\ndomain_word():域词(即,不包含后缀)email():free_email():free_email_domain():f.safe_email():安全邮箱\nf.image_url():随机URL地址ipv4():随机IP4地址ipv6():随机IP6地址\nmac_address():随机MAC地址tld():网址域名后缀(.com,.net.cn,等等,不包括.)\nuri():随机URI地址uri_extension():网址文件后缀\nuri_page():网址文件(不包含后缀)uri_path():网址文件路径(不包含文件名)\nurl():随机URL地址user_name():随机用户名isbn10():随机ISBN(10位)\nisbn13():随机ISBN(13位)job():随机职位paragraph():随机生成一个段落\nparagraphs():随机生成多个段落,通过参数nb来控制段落数,返回数组\nsentence():随机生成一句话sentences():随机生成多句话,与段落类似\ntext():随机生成一篇文章(不要幻想着人工智能了,至今没完全看懂一句话是什么意思)\nword():随机生成词语words():随机生成多个词语,用法与段落,句子,类似\nbinary():随机生成二进制编码\nboolean():True/Falselanguage_code():随机生成两位语言编码\nlocale():随机生成语言/国际 信息md5():随机生成MD5\nnull_boolean():NULL/True/Falsepassword():随机生成密码,可选参数:length:密码长度;special_chars:是否能使用特殊字符;digits:是否包含数字;upper_case:是否包含大写字母;lower_case:是否包含小写字母\nsha1():随机SHA1sha256():随机SHA256uuid4():随机UUIDfirst_name():\nfirst_name_female():女性名first_name_male():男性名\nfirst_romanized_name():罗马名last_name():last_name_female():女姓\nlast_name_male():男姓last_romanized_name():name():随机生成全名\nname_female():男性全名name_male():女性全名\nromanized_name():罗马名\nmsisdn():移动台国际用户识别码,即移动用户的ISDN号码\nphone_number():随机生成手机号phonenumber_prefix():随机生成手机号段\nprofile():随机生成档案信息simple_profile():随机生成简单档案信息\n\n\"\"\"\n#市,县\nfakerdata.city_suffix()\n\n#国家\nfakerdata.country()\n\n#:国家编码\nfakerdata.country_code()","repo_name":"CypHelp/Memory","sub_path":"newWorld/TestToolLibraryDemo/fakerDemo.py","file_name":"fakerDemo.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74358342983","text":"\"\"\"Models for Blogly.\"\"\"\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n\ndef connect_db(app):\n \"\"\"Connect to database.\"\"\"\n\n db.app = app\n db.init_app(app)\n\n\nclass User(db.Model):\n \"\"\"User class for blogly\"\"\"\n\n __tablename__ = \"users\"\n\n id = db.Column(db.Integer,\n primary_key=True,\n autoincrement=True)\n first_name = db.Column(db.String(50),\n nullable=False)\n last_name = db.Column(db.String(50),\n nullable=False)\n img_url = db.Column(db.String(5000),\n nullable=False,\n default=\"/static/blank-profile-picture-973460_960_720.webp\")\n\n \n posts = db.relationship('Post')\n\n\nclass Post(db.Model):\n\n __tablename__ = \"posts\"\n\n id = db.Column(db.Integer,\n primary_key=True,\n autoincrement=True)\n title = db.Column(db.String(200),\n nullable=False)\n content = db.Column(db.String(1000000),\n nullable=False)\n created_at = db.Column(db.DateTime,\n nullable=False)\n\n user_id = db.Column(\n db.Integer,\n db.ForeignKey('users.id'),\n nullable=False) \n\n user = db.relationship('User')\n\n\n\n","repo_name":"Cheis415/blogly","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71216339784","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#Get original data\nf = open('he2022_real_refrac.txt', \"r\")\nlines = f.readlines()[1:]\n#Wavenumber:\nfreq = []\nreal = []\nimaginary = []\n\nfor i in lines:\n freq.append(float(i.split(' ')[0]))\n #print(wavel)\n real.append(float(i.split(' ')[1]))\n #print(real)\nf.close()\n\n#Real and imagianry indices stored in two different files for this data set\nf = open('he2022_imag_refrac.txt', \"r\")\nlines = f.readlines()[1:]\nfor i in lines:\n imaginary.append(float(i.split(' ')[1]))\nf.close()\n\n#print(\"Real (unextrapolated): \", real)\n#print(\"Imaginary (unextrapolated): \", imaginary)\n\n#Calculate wavelength in microns from frequency in cm^-1\nwavel = [None]*len(freq)\nfor i in range(0, len(freq)):\n wavel[i] = (1/freq[i])*10000\n\nprint(\"Beginning of wavelength range: \", wavel[0])\nprint(\"End of wavelength range: \", wavel[-1])\n#print(\"Real (unextrapolated): \", real)\n#print(\"Imaginary (unextrapolated): \", imaginary)\n\n#Plot original data\nplt.figure()\nplt.scatter(wavel, real, s=4, label = \"Real (original)\")\nplt.savefig('he2022_unextrapolated_real.png')\n\nplt.figure()\nplt.scatter(wavel, imaginary, s=4, label = \"Imaginary (original)\")\nplt.savefig('he2022_unextrapolated_imaginary.png')\n\nprint(\"Wavenumber closest to laser: \", freq[22])\nprint(\"Wavelength closest to laser: \", wavel[22])\nprint(\"Real index near laser wavelength: \", real[22])\nprint(\"Imaginary index near laser wavelength: \", imaginary[22])\n\n#He 2022 covers our laser wavelength. No need to extrapolate\n\"\"\"\n#Fit 1D poly to last two points\nlast2real = real[0:-1]\nlast2imaginary = imaginary[0:-1]\nlast2wavel = wavel[0:-1]\n#REMEMBER: change to first or last two points depending on range\nreal_fit = np.polyfit(last2wavel, last2real, 1)\nimaginary_fit = np.polyfit(last2wavel, last2imaginary, 1) #y = Nx + b\n\nreal_line = np.poly1d(real_fit)\nimaginary_line = np.poly1d(imaginary_fit)\n\n#num_extrapolation_pts = int((2.500 - 0.4)/0.005)\n#print(\"Number of points extrapolated from data: \", num_extrapolation_pts)\n#new_wavel = np.linspace(0.400, 2.500, num_extrapolation_pts)\nnew_wavel = np.arange(0.400, 2.500, 0.005)\nreal_extrapolation = real_line(new_wavel)\nimaginary_extrapolation = imaginary_line(new_wavel)\n\n#print(\"New wavelengths: \", new_wavel)\n\n#Get real and imaginary components of refractive index at 0.44 micrometers\nlaser_wavel = float(0.44)\n#i = list((i for i, e in enumerate(new_wavel) if e == laser_wavel))\n#i = np.where(np.isclose(new_wavel, laser_wavel))\n#print(i)\ni = 8 #index of 0.44 from 0.400 at given step size, 0.005\nreal_at_wavel = real_extrapolation[i]\n#print(\"Real (extrapolated): \", real_extrapolation)\nimaginary_at_wavel = imaginary_extrapolation[i]\n#print(\"Imaginary (extrapolated)\", imaginary_extrapolation)\n\nprint(\"Real component at \", laser_wavel, \": \", real_at_wavel)\nprint(\"Imaginary component at \", laser_wavel, \": \", imaginary_at_wavel)\n\n#Plot extrapolation\nplt.figure()\nplt.plot(new_wavel, real_extrapolation, label = \"Extrapolated real\")\nplt.scatter(wavel, real, s=4, label = \"Real (original)\")\n#plt.legend()\n#plt.title('Leger (1983): real index vs. wavelength')\n#plt.savefig('leger1983_extrapolated_real.png')\n\n#plt.figure()\nplt.plot(new_wavel, imaginary_extrapolation, label = \"Extrapolated imaginary\")\nplt.scatter(wavel, imaginary, s=4, label = \"Imaginary (original)\")\nplt.legend()\n#plt.ylim(-6e-9, 1.5e-8)\nplt.title('Mukai and Kraetschmer (1986): imaginary index vs. wavelength')\nplt.savefig('mukai1986_extrapolated_both.png')\n\n\"\"\"\n","repo_name":"hypatiameraviglia/glimmer","sub_path":"lit/he_2022/he2022_extrapolate_refrac.py","file_name":"he2022_extrapolate_refrac.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11378798932","text":"import tensorflow as tf\nimport numpy as np\nimport collections\nimport os\nimport argparse\nimport datetime as dt\n\n\"\"\"To run this code, you'll need to first download and extract the text dataset\n from here: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz. Change the\n data_path variable below to your local exraction path\"\"\"\n\ndata_path = \"C:\\\\Users\\Andy\\Documents\\simple-examples\\data\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')\nparser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')\nargs = parser.parse_args()\n\ndef read_words(filename):\n with tf.gfile.GFile(filename, \"rb\") as f:\n return f.read().decode(\"utf-8\").replace(\"\\n\", \"\").split()\n\n\ndef build_vocab(filename):\n data = read_words(filename)\n\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n\n return word_to_id\n\n\ndef file_to_word_ids(filename, word_to_id):\n data = read_words(filename)\n return [word_to_id[word] for word in data if word in word_to_id]\n\n\ndef load_data():\n # get the data paths\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n # build the complete vocabulary, then convert text data to list of integers\n word_to_id = build_vocab(train_path)\n train_data = file_to_word_ids(train_path, word_to_id)\n valid_data = file_to_word_ids(valid_path, word_to_id)\n test_data = file_to_word_ids(test_path, word_to_id)\n vocabulary = len(word_to_id)\n reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))\n\n print(train_data[:5])\n print(word_to_id)\n print(vocabulary)\n print(\" \".join([reversed_dictionary[x] for x in train_data[:10]]))\n return train_data, valid_data, test_data, vocabulary, reversed_dictionary\n\n\ndef batch_producer(raw_data, batch_size, num_steps):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0: batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = data[:, i * num_steps:(i + 1) * num_steps]\n x.set_shape([batch_size, num_steps])\n y = data[:, i * num_steps + 1: (i + 1) * num_steps + 1]\n y.set_shape([batch_size, num_steps])\n return x, y\n\n\nclass Input(object):\n def __init__(self, batch_size, num_steps, data):\n self.batch_size = batch_size\n self.num_steps = num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = batch_producer(data, batch_size, num_steps)\n\n\n# create the main model\nclass Model(object):\n def __init__(self, input, is_training, hidden_size, vocab_size, num_layers,\n dropout=0.5, init_scale=0.05):\n self.is_training = is_training\n self.input_obj = input\n self.batch_size = input.batch_size\n self.num_steps = input.num_steps\n self.hidden_size = hidden_size\n\n # create the word embeddings\n with tf.device(\"/cpu:0\"):\n embedding = tf.Variable(tf.random_uniform([vocab_size, self.hidden_size], -init_scale, init_scale))\n inputs = tf.nn.embedding_lookup(embedding, self.input_obj.input_data)\n\n if is_training and dropout < 1:\n inputs = tf.nn.dropout(inputs, dropout)\n\n # set up the state storage / extraction\n self.init_state = tf.placeholder(tf.float32, [num_layers, 2, self.batch_size, self.hidden_size])\n\n state_per_layer_list = tf.unstack(self.init_state, axis=0)\n rnn_tuple_state = tuple(\n [tf.contrib.rnn.LSTMStateTuple(state_per_layer_list[idx][0], state_per_layer_list[idx][1])\n for idx in range(num_layers)]\n )\n\n # create an LSTM cell to be unrolled\n cell = tf.contrib.rnn.LSTMCell(hidden_size, forget_bias=1.0)\n # add a dropout wrapper if training\n if is_training and dropout < 1:\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)\n if num_layers > 1:\n cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_layers)], state_is_tuple=True)\n\n output, self.state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32, initial_state=rnn_tuple_state)\n # reshape to (batch_size * num_steps, hidden_size)\n output = tf.reshape(output, [-1, hidden_size])\n\n softmax_w = tf.Variable(tf.random_uniform([hidden_size, vocab_size], -init_scale, init_scale))\n softmax_b = tf.Variable(tf.random_uniform([vocab_size], -init_scale, init_scale))\n logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)\n # Reshape logits to be a 3-D tensor for sequence loss\n logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])\n\n # Use the contrib sequence loss and average over the batches\n loss = tf.contrib.seq2seq.sequence_loss(\n logits,\n self.input_obj.targets,\n tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),\n average_across_timesteps=False,\n average_across_batch=True)\n\n # Update the cost\n self.cost = tf.reduce_sum(loss)\n\n # get the prediction accuracy\n self.softmax_out = tf.nn.softmax(tf.reshape(logits, [-1, vocab_size]))\n self.predict = tf.cast(tf.argmax(self.softmax_out, axis=1), tf.int32)\n correct_prediction = tf.equal(self.predict, tf.reshape(self.input_obj.targets, [-1]))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n if not is_training:\n return\n self.learning_rate = tf.Variable(0.0, trainable=False)\n\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 5)\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n # optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.train_op = optimizer.apply_gradients(\n zip(grads, tvars),\n global_step=tf.contrib.framework.get_or_create_global_step())\n # self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.cost)\n\n self.new_lr = tf.placeholder(tf.float32, shape=[])\n self.lr_update = tf.assign(self.learning_rate, self.new_lr)\n\n def assign_lr(self, session, lr_value):\n session.run(self.lr_update, feed_dict={self.new_lr: lr_value})\n\n\ndef train(train_data, vocabulary, num_layers, num_epochs, batch_size, model_save_name,\n learning_rate=1.0, max_lr_epoch=10, lr_decay=0.93, print_iter=50):\n # setup data and models\n training_input = Input(batch_size=batch_size, num_steps=35, data=train_data)\n m = Model(training_input, is_training=True, hidden_size=650, vocab_size=vocabulary,\n num_layers=num_layers)\n init_op = tf.global_variables_initializer()\n orig_decay = lr_decay\n with tf.Session() as sess:\n # start threads\n sess.run([init_op])\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n saver = tf.train.Saver()\n for epoch in range(num_epochs):\n new_lr_decay = orig_decay ** max(epoch + 1 - max_lr_epoch, 0.0)\n m.assign_lr(sess, learning_rate * new_lr_decay)\n # m.assign_lr(sess, learning_rate)\n # print(m.learning_rate.eval(), new_lr_decay)\n current_state = np.zeros((num_layers, 2, batch_size, m.hidden_size))\n curr_time = dt.datetime.now()\n for step in range(training_input.epoch_size):\n # cost, _ = sess.run([m.cost, m.optimizer])\n if step % print_iter != 0:\n cost, _, current_state = sess.run([m.cost, m.train_op, m.state],\n feed_dict={m.init_state: current_state})\n else:\n seconds = (float((dt.datetime.now() - curr_time).seconds) / print_iter)\n curr_time = dt.datetime.now()\n cost, _, current_state, acc = sess.run([m.cost, m.train_op, m.state, m.accuracy],\n feed_dict={m.init_state: current_state})\n print(\"Epoch {}, Step {}, cost: {:.3f}, accuracy: {:.3f}, Seconds per step: {:.3f}\".format(epoch,\n step, cost, acc, seconds))\n\n # save a model checkpoint\n saver.save(sess, data_path + '\\\\' + model_save_name, global_step=epoch)\n # do a final save\n saver.save(sess, data_path + '\\\\' + model_save_name + '-final')\n # close threads\n coord.request_stop()\n coord.join(threads)\n\n\ndef test(model_path, test_data, reversed_dictionary):\n test_input = Input(batch_size=20, num_steps=35, data=test_data)\n m = Model(test_input, is_training=False, hidden_size=650, vocab_size=vocabulary,\n num_layers=2)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n # start threads\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n current_state = np.zeros((2, 2, m.batch_size, m.hidden_size))\n # restore the trained model\n saver.restore(sess, model_path)\n # get an average accuracy over num_acc_batches\n num_acc_batches = 30\n check_batch_idx = 25\n acc_check_thresh = 5\n accuracy = 0\n for batch in range(num_acc_batches):\n if batch == check_batch_idx:\n true_vals, pred, current_state, acc = sess.run([m.input_obj.targets, m.predict, m.state, m.accuracy],\n feed_dict={m.init_state: current_state})\n pred_string = [reversed_dictionary[x] for x in pred[:m.num_steps]]\n true_vals_string = [reversed_dictionary[x] for x in true_vals[0]]\n print(\"True values (1st line) vs predicted values (2nd line):\")\n print(\" \".join(true_vals_string))\n print(\" \".join(pred_string))\n else:\n acc, current_state = sess.run([m.accuracy, m.state], feed_dict={m.init_state: current_state})\n if batch >= acc_check_thresh:\n accuracy += acc\n print(\"Average accuracy: {:.3f}\".format(accuracy / (num_acc_batches-acc_check_thresh)))\n # close threads\n coord.request_stop()\n coord.join(threads)\n\n\nif args.data_path:\n data_path = args.data_path\ntrain_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()\nif args.run_opt == 1:\n train(train_data, vocabulary, num_layers=2, num_epochs=60, batch_size=20,\n model_save_name='two-layer-lstm-medium-config-60-epoch-0p93-lr-decay-10-max-lr')\nelse:\n trained_model = args.data_path + \"\\\\two-layer-lstm-medium-config-60-epoch-0p93-lr-decay-10-max-lr-38\"\n test(trained_model, test_data, reversed_dictionary)\n\n","repo_name":"adventuresinML/adventures-in-ml-code","sub_path":"lstm_tutorial.py","file_name":"lstm_tutorial.py","file_ext":"py","file_size_in_byte":11368,"program_lang":"python","lang":"en","doc_type":"code","stars":1042,"dataset":"github-code","pt":"81"} +{"seq_id":"41462873920","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GaussianConv2dReparameterization(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, bias=True, W_std=None, b_std=None,\n prior_per=\"layer\", scaled_variance=True):\n super(GaussianConv2dReparameterization, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = (kernel_size, kernel_size)\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.bias = bias\n self.scaled_variance = scaled_variance\n eps = 1e-6\n\n if W_std is None:\n if self.scaled_variance:\n W_std = 1.\n else:\n W_std = 1. / math.sqrt(self.in_channels * self.kernel_size[0] * self.kernel_size[1])\n if b_std is None:\n b_std = 1.\n\n # Initialize the parameters\n if prior_per == \"layer\":\n W_shape, b_shape = (1), (1)\n elif prior_per == \"parameter\":\n W_shape = (self.out_channels, self.in_channels, *self.kernel_size)\n b_shape = (self.out_channels)\n\n self.W_mu = 0.\n self.b_mu = 0.\n\n self.W_std = nn.Parameter(\n torch.ones(W_shape) * W_std, requires_grad=True)\n if self.bias:\n self.b_std = nn.Parameter(\n torch.ones(b_shape) * b_std, requires_grad=True)\n else:\n self.register_buffer(\n 'b_std', torch.ones(b_shape))\n\n def forward(self, X):\n W = self.W_mu + F.softplus(self.W_std) *\\\n torch.randn((self.out_channels, self.in_channels,\n *self.kernel_size), device=self.W_std.device)\n if self.scaled_variance:\n W = W / math.sqrt(self.in_channels * self.kernel_size[0] * self.kernel_size[1])\n if self.bias:\n b = self.b_mu + F.softplus(self.b_std) *\\\n torch.randn((self.out_channels), device=self.b_std.device)\n else:\n b = torch.zeros((self.out_channels), device=self.W_std.device)\n\n return F.conv2d(X, W, b, self.stride, self.padding, self.dilation)","repo_name":"tranbahien/you-need-a-good-prior","sub_path":"optbnn/bnn/reparam_layers/gaussian_reparam_conv.py","file_name":"gaussian_reparam_conv.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"30436677078","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Import Library\n\n# In[2]:\n\n\nfrom flask import Flask, render_template, request, redirect, jsonify\nimport pickle\nimport sklearn\nimport numpy as np \nfrom sklearn.preprocessing import LabelEncoder\nenc = LabelEncoder()\n\napp = Flask(__name__)\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n\n dtreepred = pickle.load(open('dtree_pickle','rb'))\n\n nama = str(request.form['nama'])\n umur = float(request.form['umur'])\n jenis_kelamin = float(request.form['jk'])\n jenis_nyeri = float(request.form['jn'])\n tekanan_darah = float(request.form['td'])\n kolesterol = float(request.form['kole'])\n gula_darah = float(request.form['gd'])\n detak_jantung = float(request.form['dj'])\n anginast = float(request.form['angina'])\n kemiringan = float(request.form['kemset'])\n\n datas = np.array([umur,jenis_kelamin,jenis_nyeri,tekanan_darah,kolesterol,gula_darah,detak_jantung,anginast,kemiringan])\n datas = np.reshape(datas, (1, -1))\n\n SeranganJantung = dtreepred.predict(datas)\n\n return render_template('hasil.html', finalData=SeranganJantung, umur=umur, nama=nama)\n else:\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"Afiyatar16/METODOLOGI-DATA-SCIENCE---16-Februari-2022","sub_path":"TR_METDAS/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30905145867","text":"\"\"\"Seed file to make sample data for pets db.\"\"\"\n\nfrom models import User, db, Post, Tag, PostTag\nfrom app import app\n\n# Create all tables\ndb.drop_all() # drop all tables from database\ndb.create_all()\n\n# If table isn't empty, empty it\nUser.query.delete() # delete everything\nPost.query.delete()\nTag.query.delete()\n\n\n# Add pets\nwhiskey = User(first_name='Whiskey', last_name=\"dog\", img_url=\"pending\")\nbowser = User(first_name='Bowser', last_name=\"dog\", img_url=\"pending\")\nspike = User(first_name='Spike', last_name=\"porcupine\", img_url=\"pending\")\ndb.session.add_all([whiskey, bowser, spike])\ndb.session.commit()\n\n\n# Add posts\npost1 = Post(title=\"hi\", content=\"hi again\", user_id=1)\npost2 = Post(title=\"how are you\", content=\"All good\", user_id=1)\npost3 = Post(title=\"Goodbye\", content=\"Bye!\", user_id=2)\npost4 = Post(title=\"Small Talk\", content=\"What do you do for fun\", user_id=3)\ndb.session.add_all([post1, post2, post3, post4])\ndb.session.commit()\n\n\n# Add tags\ntag1 = Tag(name='greeting')\ntag2 = Tag(name='farewell')\ntag3 = Tag(name='chat')\ndb.session.add_all([tag1, tag2, tag3])\ndb.session.commit()\n\n\n# Add post_tags\npost_tag1 = PostTag(post_id=1, tag_id=1)\npost_tag2 = PostTag(post_id=2, tag_id=1)\npost_tag3 = PostTag(post_id=3, tag_id=1)\npost_tag4 = PostTag(post_id=4, tag_id=3)\n\n\n# Add new objects to session, so they'll persist\n\n\n\n\n\n\ndb.session.add_all([post_tag1, post_tag2, post_tag3, post_tag4])\n\ndb.session.commit()\n\n\n# Commit--otherwise, this never gets saved!\n","repo_name":"Cxynthia-alt/blogly","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41348175098","text":"import time\n\n\ndef exercise_029(minimum_a, maximum_a, minimum_b, maximum_b):\n \"\"\"\n Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:\n\n 2^2=4, 2^3=8, 2^4=16, 2^5=32\n 3^2=9, 3^3=27, 3^4=81, 3^5=243\n 4^2=16, 4^3=64, 4^4=256, 4^5=1024\n 5^2=25, 5^3=125, 5^4=625, 5^5=3125\n If they are then placed in numerical order, with any repeats removed,\n we get the following sequence of 15 distinct terms:\n\n 4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125\n\n How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤\n 100 and 2 ≤ b ≤ 100?\n\n :param minimum_a: Minimum value for a\n :type minimum_a: int\n :param maximum_a: Maximum value for a\n :type maximum_a: int\n :param minimum_b: Minimum value for b\n :type minimum_b: int\n :param maximum_b: Maximum value for b\n :type maximum_b: int\n\n :return: Sum of the diagonals\n :rtype: int\n \"\"\"\n\n terms = set()\n for a in range(minimum_a, maximum_a + 1):\n for b in range(minimum_b, maximum_b + 1):\n terms.add(a ** b)\n return len(terms)\n\n\nif __name__ == '__main__':\n start_time = time.time()\n print(exercise_029(2, 100, 2, 100))\n end_time = time.time()\n print('{} s'.format(end_time - start_time))\n","repo_name":"psanchezc23/project_euler","sub_path":"exercises/exercise_029.py","file_name":"exercise_029.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4693196670","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\n#\n# from common.Locker import Locker\nfrom common.Tool import Tool\nfrom common.Config import Config\nfrom common.Redis import Redis\nimport datetime\nimport os\nimport subprocess\nimport time\nimport demjson as JSON\n\nclass ControlService():\n\n def __init__(self, cmd, runConfig, globalId):\n self.cmd = cmd\n self.runConfig = JSON.decode(runConfig)\n self.globalId = globalId\n self.sender = Redis.get()\n self.build()\n\n def build(self):\n raise Exception(\"rewrite build cmd\")\n\n def start(self):\n fd = open('/tmp/cmd.log', 'w')\n subprocess.Popen(self.cmd, shell=True, stdout=fd)\n\n\n def stop(self):\n raise Exception(\"rewrite stop\")\n\n def status(self):\n raise Exception(\"rewrite status\")\n\n def restart(self):\n if self.status():\n self.stop()\n while self.status():\n time.sleep(1)\n self.start()\n\n else:\n self.start()\n\n\n def run(self):\n now = datetime.datetime.now()\n dateStr = now.strftime(\"%Y%m%d\")\n\n isRunTime = False\n for i in xrange(0, len(self.runConfig), 2):\n startTime = datetime.datetime.strptime(\"%s %s\" % (dateStr, self.runConfig[i]),'%Y%m%d %H:%M')\n endTime = datetime.datetime.strptime(\"%s %s\" % (dateStr, self.runConfig[i+1]),'%Y%m%d %H:%M')\n if endTime < startTime:\n endTime = endTime + datetime.timedelta(days=1)\n if startTime <= now <= endTime:\n isRunTime = True\n\n if isRunTime:\n if not self.status():\n self.start()\n else:\n if self.status():\n self.stop()\n\n\nclass CTPService(ControlService):\n\n def build(self):\n config = Config.get()\n appRoot = config['appRoot']\n ctpConfig = config['ctpConfig']\n\n self.cmd = \"%s/%s %s/%s\" % (appRoot, self.cmd, appRoot, ctpConfig)\n\n def stop(self):\n self.sender.publish(self.globalId, \"2\")\n\n def status(self):\n if self.sender.publish(self.globalId, \"1\"):\n return True\n return False\n\n\nclass ModelService(ControlService):\n\n def build(self):\n config = Config.get()\n appRoot = config['appRoot']\n\n self.cmd = \"CTP_CONFIG_PATH=%s python %s/%s %s\" % (os.environ.get('CTP_CONFIG_PATH'), appRoot, self.cmd, self.globalId)\n\n def stop(self):\n self.sender.publish(self.globalId, \"STOP\")\n\n\n def status(self):\n if self.sender.publish(self.globalId, \"STATUS\"):\n return True\n return False\n","repo_name":"lllzzz/NewCTPService","sub_path":"src/py/service/ControlService.py","file_name":"ControlService.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"44012915915","text":"from src.utils.get_types import get_data_types\n\n\ndef get_schema(data: dict) -> dict:\n '''takes in a dictionary data and returns a schema dictionary. '''\n \n schema = {}\n for key, values in data.items():\n value_types = get_data_types(values)\n schema[key] = {'type': value_types,\n 'tag': '', 'description': '', 'required': False}\n return schema\n","repo_name":"K-Honsu/Data2Bots-Assessment","sub_path":"src/utils/get_schema.py","file_name":"get_schema.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929114505","text":"class Config:\n def __init__(self):\n self.embed_dense = True\n self.embed_dense_dim = 512 # 对BERT的Embedding降维\n self.warmup_proportion = 0.05\n self.use_bert = True\n self.keep_prob = 0.9\n self.relation_num = 21 # 实体的种类\n\n self.decay_rate = 0.1\n self.decay_step = 400\n self.num_checkpoints = 200\n\n self.train_epoch = 20\n self.sequence_length = 360 # Fine-tune sequence_length\n self.train_batch_size = 2 # Fine-tune batch_size\n\n self.learning_rate = 1e-4 # 下接结构的学习率 1e-4\n self.embed_learning_rate = 3e-5 # BERT的微调学习率 3e-5 5e-5\n\n self.model_type = \"ALBERT\" # BERT XLNET\n\n # albert预训练模型的存放地址\n self.bert_file = '/home/hezoujie/Models/albert/model.ckpt-best'\n self.bert_config_file = '/home/hezoujie/Models/albert/albert_config.json'\n self.vocab_file = '/home/hezoujie/Models/albert/vocab_chinese.txt'\n\n # xlnet预训练模型的存放地址\n # self.bert_file = '/home/hezoujie/Models/xlnet/xlnet_model.ckpt'\n # self.xlnet_config_file = '/home/hezoujie/Models/xlnet/xlnet_config.json'\n # self.vocab_file = '/home/hezoujie/Models/albert/vocab_chinese.txt'\n # self.dropatt = 0.1\n # self.clamp_len = -1\n # self.is_training = True\n # self.use_tpu = False\n # self.use_bfloat16 = False\n # self.init = \"normal\"\n # self.init_range = 0.1\n # self.init_std = 0.02\n\n # roberta_wwm_large预训练模型的存放地址\n # self.bert_file = '/home/hezoujie/Models/roberta_wwm_large/bert_model.ckpt'\n # self.bert_config_file = '/home/hezoujie/Models/roberta_wwm_large/bert_config.json'\n # self.vocab_file = '/home/hezoujie/Models/roberta_wwm_large/vocab.txt'\n\n # predict.py ensemble.py get_ensemble_final_result.py post_ensemble_final_result.py的结果路径\n self.ensemble_source_file = '/home/hezoujie/Competition/CCKS_Military_NER/data/Military_entity_recog/ensemble/source_file/'\n self.ensemble_result_file = '/home/hezoujie/Competition/CCKS_Military_NER/data/Military_entity_recog/ensemble/result_file/'\n\n # 存放的模型名称,用以预测\n self.continue_training = False\n self.checkpoint_path = \"/home/hezoujie/Competition/CCKS_Military_NER/data/Military_entity_recog/model/runs_7/1598627829/model_0.89_0.79_0.8378-693\"\n\n self.model_dir = '/home/hezoujie/Competition/CCKS_Military_NER/data/Military_entity_recog/model' # 模型存放地址\n self.new_data_process_quarter_final = '/home/hezoujie/Competition/CCKS_Military_NER/data/Military_entity_recog/clear_csv_data/data_process_quarter_final/' # 数据预处理的结果路径\n self.source_data_dir = '/home/hezoujie/Competition/CCKS_Military_NER/data/Military_entity_recog/clear_csv_data/' # 原始数据集\n\n self.lstm_dim = 512\n self.dropout = 0.4\n self.use_origin_bert = False # True:使用原生bert, False:使用动态融合bert\n","repo_name":"jevishoo/HaihuaMRC","sub_path":"C3/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23669454660","text":"from sys import exit\r\ndef feedback(gl,gf):\r\n l,i=[],0\r\n while(i liminf):\r\n n=int(input(\"Ingrese un numero: \"))\r\n if (n>liminf and n compute_v1.Image:\n \n image_client = compute_v1.ImagesClient()\n newest_image = image_client.get_from_family(project=project, family=family)\n return newest_image\n\n\ndef disk_from_image(\n disk_type: str,\n disk_size_gb: int,\n boot: bool,\n source_image: str,\n auto_delete: bool = True,\n) -> compute_v1.AttachedDisk:\n \n boot_disk = compute_v1.AttachedDisk()\n initialize_params = compute_v1.AttachedDiskInitializeParams()\n initialize_params.source_image = source_image\n initialize_params.disk_size_gb = disk_size_gb\n initialize_params.disk_type = disk_type\n boot_disk.initialize_params = initialize_params\n # Remember to set auto_delete to True if you want the disk to be deleted when you delete\n # your VM instance.\n boot_disk.auto_delete = auto_delete\n boot_disk.boot = boot\n return boot_disk\n\n\ndef wait_for_extended_operation(\n operation: ExtendedOperation, verbose_name: str = \"operation\", timeout: int = 300\n) -> Any:\n \n\n result = operation.result(timeout=timeout)\n\n if operation.error_code:\n print(\n f\"Error during {verbose_name}: [Code: {operation.error_code}]: {operation.error_message}\",\n file=sys.stderr,\n flush=True,\n )\n print(f\"Operation ID: {operation.name}\", file=sys.stderr, flush=True)\n raise operation.exception() or RuntimeError(operation.error_message)\n\n if operation.warnings:\n print(f\"Warnings during {verbose_name}:\\n\", file=sys.stderr, flush=True)\n for warning in operation.warnings:\n print(f\" - {warning.code}: {warning.message}\", file=sys.stderr, flush=True)\n\n return result\n\n\ndef create_instance(\n project_id:str,\n zone:str,\n instance_name:str,\n disks= [\n disk_from_image(\n disk_type=\"zones/us-central1-c/diskTypes/pd-balanced\",\n disk_size_gb=10,\n boot=True,\n source_image=\"projects/debian-cloud/global/images/debian-11-bullseye-v20230615\",\n )\n],\n machine_type: str = \"n1-standard-1\",\n network_link: str = \"global/networks/default\",\n subnetwork_link: str = None,\n internal_ip: str = None,\n external_access: bool = False,\n external_ipv4: str = None,\n accelerators: list[compute_v1.AcceleratorConfig] = None,\n preemptible: bool = False,\n spot: bool = False,\n instance_termination_action: str = \"STOP\",\n custom_hostname: str = None,\n delete_protection: bool = False,\n) -> compute_v1.Instance:\n \n instance_client = compute_v1.InstancesClient()\n\n\n network_interface = compute_v1.NetworkInterface()\n network_interface.network = network_link\n if subnetwork_link:\n network_interface.subnetwork = subnetwork_link\n\n if internal_ip:\n network_interface.network_i_p = internal_ip\n\n if external_access:\n access = compute_v1.AccessConfig()\n access.type_ = compute_v1.AccessConfig.Type.ONE_TO_ONE_NAT.name\n access.name = \"External NAT\"\n access.network_tier = access.NetworkTier.PREMIUM.name\n if external_ipv4:\n access.nat_i_p = external_ipv4\n network_interface.access_configs = [access]\n\n # Collect information into the Instance object.\n instance = compute_v1.Instance()\n instance.network_interfaces = [network_interface]\n instance.name = instance_name\n instance.disks = disks\n if re.match(r\"^zones/[a-z\\d\\-]+/machineTypes/[a-z\\d\\-]+$\", machine_type):\n instance.machine_type = machine_type\n else:\n instance.machine_type = f\"zones/{zone}/machineTypes/{machine_type}\"\n\n if accelerators:\n instance.guest_accelerators = accelerators\n\n if preemptible:\n # Set the preemptible setting\n warnings.warn(\n \"Preemptible VMs are being replaced by Spot VMs.\", DeprecationWarning\n )\n instance.scheduling = compute_v1.Scheduling()\n instance.scheduling.preemptible = True\n\n if spot:\n # Set the Spot VM setting\n instance.scheduling = compute_v1.Scheduling()\n instance.scheduling.provisioning_model = (\n compute_v1.Scheduling.ProvisioningModel.SPOT.name\n )\n instance.scheduling.instance_termination_action = instance_termination_action\n\n if custom_hostname is not None:\n # Set the custom hostname for the instance\n instance.hostname = custom_hostname\n\n if delete_protection:\n # Set the delete protection bit\n instance.deletion_protection = True\n\n # Prepare the request to insert an instance.\n request = compute_v1.InsertInstanceRequest()\n request.zone = zone\n request.project = project_id\n request.instance_resource = instance\n\n # Wait for the create operation to complete.\n print(f\"Creating the {instance_name} instance in {zone}...\")\n\n operation = instance_client.insert(request=request)\n\n wait_for_extended_operation(operation, \"instance creation\")\n\n print(f\"Instance {instance_name} created.\")\n return instance_client.get(project=project_id, zone=zone, instance=instance_name)\n\n\n# start instance\ndef start_instance(project_id: str, zone: str, instance_name: str) -> None:\n \n instance_client = compute_v1.InstancesClient()\n\n operation = instance_client.start(\n project=project_id, zone=zone, instance=instance_name\n )\n\n wait_for_extended_operation(operation, \"instance start\")\n print(f\"Instance {instance_name} started.\")\n\n\n#stop instance\ndef stop_instance(project_id: str, zone: str, instance_name: str) -> None:\n \n instance_client = compute_v1.InstancesClient()\n\n operation = instance_client.stop(\n project=project_id, zone=zone, instance=instance_name\n )\n wait_for_extended_operation(operation, \"instance stopping\")\n print(f\"Instance {instance_name} stopped.\")\n\n\n# Delete Instance\ndef delete_instance(project_id: str, zone: str, instance_name: str) -> None:\n \n \n instance_client = compute_v1.InstancesClient()\n\n print(f\"Deleting {instance_name} from {zone}...\")\n operation = instance_client.delete(\n project=project_id, zone=zone, instance=instance_name\n )\n wait_for_extended_operation(operation, \"instance deletion\")\n print(f\"Instance {instance_name} deleted.\")\n\n\n# create_instance(project_id=\"sanguine-line-391106\", zone=\"us-central1-c\", instance_name=\"my-instance\");\n# start_instance(project_id=\"sanguine-line-391106\", zone=\"us-central1-c\", instance_name=\"my-instance\");\n# stop_instance(project_id=\"sanguine-line-391106\", zone=\"us-central1-c\", instance_name=\"my-instance\");\n# delete_instance(project_id=\"sanguine-line-391106\", zone=\"us-central1-c\", instance_name=\"my-instance\");","repo_name":"Suprat-Poudel/GCP","sub_path":"Instance/vm_instance_operation.py","file_name":"vm_instance_operation.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"583947382","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 28 18:30:10 2018\n\n@author: Mohammad Doosti Lakhani\n\"\"\"\n\"\"\"\nBe sure to do not set your file name \"xgboost.py\". if you do, you cannot import library!!!\n\"\"\"\n# Importing Library\nimport pandas as pd\n\n# Import Dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nx = dataset.iloc[:, 3:-1].values\ny = dataset.iloc[:, -1].values\n\n# Encoding categorical data to one hot encoded form\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_country = LabelEncoder()\nx[:, 1] = labelencoder_country.fit_transform(x[:, 1]) # Encoding 'Geography' to numbers\n\nlabelencoder_gender = LabelEncoder()\nx[:, 2] = labelencoder_gender.fit_transform(x[:, 2]) # Encoding 'Gender' to numbers\n\nonehotencoder = OneHotEncoder(categorical_features = [1])\nx = onehotencoder.fit_transform(x).todense() # Encoding 'Geography' to one hot\n\nx = x[:, 1:] # get rid of dummy variable trap\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nx = sc.fit_transform(x)\n\nfrom sklearn.preprocessing import MinMaxScaler\nscalar = MinMaxScaler()\nx= scalar.fit_transform(x)\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)\n\n# Fitting XGboost model\nfrom xgboost import XGBClassifier\nparam = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic','eval_metric':'auc'}\nclassifier = XGBClassifier(**param)\nclassifier = classifier.fit(x_train,y_train)\n\n# Acurracy on test and train set using K-Fold Cross Validation\nfrom sklearn.model_selection import cross_val_score\naccs_train = cross_val_score(estimator = classifier, X = x_train, y = y_train, cv=10)\naccs_test = cross_val_score(estimator = classifier, X = x_test, y = y_test, cv=10)\n\nacc_train = accs_train.mean()\nacc_test = accs_test.mean()\nprint('Train status = %{} Accuracy'.format(acc_train*100))\nprint('Test status = %{} Accuracy'.format(acc_test*100))\n","repo_name":"Nikronic/Machine-Learning-Models","sub_path":"Part 10 - Model Selection & Boosting/Section 27 - XGBoost/xgb.py","file_name":"xgb.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"18420796043","text":"import numpy as np\nimport os\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom textbook_5_regression.gauss_basis import gauss_basis_one as gau\n\n# Load data =====\ncurrent_dir = os.path.dirname(__file__)\nparent_dir = str(Path(current_dir).resolve().parent)\nloaded_data = np.load(parent_dir + \"/base_data/base_data_1.npz\")\nX = loaded_data[\"X\"]\nX_min = loaded_data[\"X_min\"]\nX_max = loaded_data[\"X_max\"]\nX_n = loaded_data[\"X_n\"]\nT = loaded_data[\"T\"]\n\n# K-fold cross validation =====\ndef kfold_gauss_func(x, t, m, k):\n n = x.shape[0]\n mse_train = np.zeros(k)\n mse_test = np.zeros(k)\n for i in range(0, k):\n x_train = x[np.fmod(range(n), k) != i]\n t_train = t[np.fmod(range(n), k) != i]\n x_test = x[np.fmod(range(n), k) == i]\n t_test = t[np.fmod(range(n), k) == i]\n wm = gau.fit_gauss_func(x_train, t_train, m)\n mse_train[i] = gau.mse_gauss_func(x_train, t_train, wm)\n mse_test[i] = gau.mse_gauss_func(x_test, t_test, wm)\n return mse_train, mse_test\n\n\nif __name__ == '__main__':\n M = range(2, 12)\n K = 16\n Cv_Gauss_train = np.zeros((K, len(M)))\n Cv_Gauss_test = np.zeros((K, len(M)))\n for i in range(0, len(M)):\n Cv_Gauss_train[:, i], Cv_Gauss_test[:, i] = kfold_gauss_func(X, T, M[i], K)\n mean_Gauss_train = np.sqrt(np.mean(Cv_Gauss_train, axis=0))\n mean_Gauss_test = np.sqrt(np.mean(Cv_Gauss_test, axis=0))\n\n # Create graph =====\n plt.figure(figsize=(4, 3))\n plt.plot(M, mean_Gauss_train, marker=\"o\", linestyle=\"-\", color=\"k\",\n markerfacecolor=\"w\", label=\"training\")\n plt.plot(M, mean_Gauss_test, marker=\"o\", linestyle=\"-\", color=\"cornflowerblue\",\n markeredgecolor=\"black\", label=\"test\")\n plt.legend(loc=\"lower left\", fontsize=10)\n plt.ylim(0, 20)\n plt.grid(True)\n plt.show()\n","repo_name":"ayoshimatsu/machine_learn_begin","sub_path":"textbook_5_regression/gauss_basis/verify_kfold_gauss.py","file_name":"verify_kfold_gauss.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2278819063","text":"'''\nParameter file for 3D fakequakes run\n'''\n\n\nfrom mudpy import fakequakes,runslip,forward\nimport numpy as np\nfrom obspy.core import UTCDateTime\n\n\n\n######## GLOBALS ########\nhome='/Users/dmelgarm/FakeQuakes/'\nproject_name='Cascadia3D_test'\nrun_name='cascadia3D'\n################################################################################\n\n\n############## What do you want to do?? ##################\ninit=0\nmake_ruptures=0\nmake_GFs=0\nmake_synthetics=0\nmake_waveforms=1\nmake_hf_waveforms=0\nmatch_filter=0\nmake_statics=0\n# Things that only need to be done once\nload_distances=1\nG_from_file=1\n###############################################################################\n\n\n############# Run-time parameters ##################\nncpus=4\nhot_start=0\nmodel_name='cascadia.mod' # Velocity model\nmoho_depth_in_km=25.0\nfault_name='cascadia30.fault'\nslab_name='cascadia_slab.xyz' # Slab 1.0 Ascii file\nmesh_name='cascadia30.mshout' \ndistances_name=fault_name # Name of distances matrices\nrupture_list='ruptures.list'\nUTM_zone='10T'\nscaling_law='T' # T for thrust, S for strike-slip, N for normal\n\n#Station information\nGF_list='cascadia_test.gflist'\nG_name=run_name #Name of G matrix for waveforms\nG_name_static=run_name+'_statics' #Name of G matrix for statics\n\nNrealizations=4 # Number of fake ruptures to generate per magnitude bin\ntarget_Mw=np.arange(8.2,9.3,0.2) # Of what approximate magnitudes\nmax_slip=100 #Maximum sip (m) allowed in the model\nmax_slip_rule=True #restrict max slip to 3 times Allen & Hayes 2017\n\n# Displacement and velocity waveform parameters\nNFFT=128 ; dt=1.0\n#fk-parameters\ndk=0.1 ; pmin=0 ; pmax=1 ; kmax=20\ncustom_stf=None\n\n#High frequency waveofmr parameters\nhf_dt=0.01\nduration=120\nPwave=True\n\n#Match filter parameters\nzero_phase=False\norder=4\nfcorner=1.0\n\n# Correlation function parameters\nhurst=0.75 # Melgar and Hayes 2019 found Hurst exponent is probably closer to 0.4\nLdip='auto' # Correlation length scaling, 'auto' uses Melgar & Hayes 2019\nLstrike='auto' # MB2002 uses Mai & Beroza 2002\nlognormal=True # Keep this as true\nslip_standard_deviation=0.46 # Value from Melgar & Hayes 2019\n\n# Rupture parameters\ntime_epi=UTCDateTime('2014-04-01T23:46:47Z')\nhypocenter=None #=None is random hypocenter\nsource_time_function='dreger' # options are 'triangle' or 'cosine' or 'dreger'\nstf_falloff_rate=4 #Only affects Dreger STF, choose 4-8 are reasonaclosble values\nnum_modes=200 # The more modes, the better you can model the high frequency stuff\nstress_parameter=50 #measured in bars\nhigh_stress_depth=30 # SMGA must be below this depth (measured in km)\nrake=90 # average rake\nrise_time_depths=[10,15] #Transition depths for rise time scaling (if slip shallower than first index, rise times are twice as long as calculated)\nmean_slip_name=None\nshear_wave_fraction=0.8\n\n#Enforcement of rules on area scaling and hypo location\nforce_area=False\nforce_magnitude=False\nforce_hypocenter=False\nuse_hypo_fraction=False\n###############################################################################\n\n\n\n#Initalize project folders\nif init==1:\n fakequakes.init(home,project_name)\n \n#Generate rupture models\nif make_ruptures==1: \n fakequakes.generate_ruptures(home,project_name,run_name,fault_name,slab_name,\n mesh_name,load_distances,distances_name,UTM_zone,target_Mw,model_name,\n hurst,Ldip,Lstrike,num_modes,Nrealizations,rake,\n rise_time_depths,time_epi,max_slip,source_time_function,lognormal,\n slip_standard_deviation,scaling_law,ncpus,mean_slip_name=mean_slip_name,\n force_magnitude=force_magnitude,force_area=force_area,hypocenter=hypocenter,\n force_hypocenter=force_hypocenter,shear_wave_fraction=shear_wave_fraction,\n max_slip_rule=max_slip_rule,use_hypo_fraction=use_hypo_fraction)\n\n \n# Prepare waveforms and synthetics \nif make_GFs==1 or make_synthetics==1:\n runslip.inversionGFs(home,project_name,GF_list,None,fault_name,model_name,\n dt,None,NFFT,None,make_GFs,make_synthetics,dk,pmin,\n pmax,kmax,0,time_epi,hot_start,ncpus,custom_stf,impulse=True) \n\n#Make low frequency waveforms\nif make_waveforms==1:\n forward.waveforms_fakequakes(home,project_name,fault_name,rupture_list,GF_list,\n model_name,run_name,dt,NFFT,G_from_file,G_name,source_time_function,\n stf_falloff_rate,hot_start=hot_start)\n\n#Generate static offsets\nif make_statics==1:\n forward.coseismics_fakequakes(home,project_name,GF_list,G_from_file,G_name_static,\n model_name,rupture_list)\n\n#Make semistochastic HF waveforms \nif make_hf_waveforms==1:\n forward.hf_waveforms(home,project_name,fault_name,rupture_list,GF_list,\n model_name,run_name,dt,NFFT,G_from_file,G_name,rise_time_depths,\n moho_depth_in_km,ncpus,source_time_function=source_time_function,\n duration=duration,stf_falloff_rate=stf_falloff_rate,hf_dt=hf_dt,\n Pwave=Pwave,hot_start=hot_start,stress_parameter=stress_parameter,\n high_stress_depth=high_stress_depth)\n\n# Combine LF and HF waveforms with match filter \nif match_filter==1:\n forward.match_filter(home,project_name,fault_name,rupture_list,GF_list,\n zero_phase,order,fcorner)\n \n \n","repo_name":"dmelgarm/MudPy","sub_path":"examples/fakequakes/3D/cascadia_3D.fq.py","file_name":"cascadia_3D.fq.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"62"} +{"seq_id":"35422521567","text":"from replay_buffer import DequeReplayBuffer\nfrom agents.DQNAgent import DQNAgent\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# create the buffer\nreplay_buffer = DequeReplayBuffer(capacity=500000)\n# env_name=\"CartPole-v1 can use SimpleModel\n# try Pong-v0 later\n\n# create agent\nagent = DQNAgent(replay_buffer,\n env_name=\"CartPole-v1\",\n model_name=\"simple\",\n n_episodes=300,\n epsilon=0.5,\n batch_size=64,\n learning_rate=0.0005,\n update_interval=150,\n gamma=1,\n optimizer=\"adam\",\n modify_env=False)\nagent.populate_buffer()\nrecord, rolling_avg, loss_record = agent.train(policy_name=\"egreedyexp\")\nx = np.arange(len(record))\n# print(record)\n\nfig, rewards_plot = plt.subplots(figsize=(6, 6))\nrewards_plot.plot(x, record, label=\"rewards\")\nrewards_plot.plot(x, rolling_avg, label=\"rolling_avg\")\nrewards_plot.plot(x, loss_record, label=\"loss\")\nrewards_plot.legend()\nplt.savefig('train_results.png')\nplt.show()\n\n#%%\n","repo_name":"furrrow/RL_pytorch","sub_path":"main_CartPole_DQN.py","file_name":"main_CartPole_DQN.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15609030668","text":"# Aaron Fienberg\n#\n# Provides utility functions for:\n# building DEgg ADC sample histograms in real time,\n# plotting the histograms,\n# and calculating summary statistics from the histograms\n#\n# Histograms are python dictionaries with\n# the following three key value pairs:\n# 'min': the minimum ADC value seen\n# 'max': the maximum ADC value seen\n# 'counts': an array of length (max-min) + 1; the histogram bin counts\n#\n# the ADC sample value associated with bin index i in the counts array\n# is hist['min'] + i\n\nfrom iceboot.test_waveform import parseTestWaveform\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math\n\n\ndef hist_mean(hist):\n bin_vals = np.arange(hist[\"min\"], hist[\"max\"] + 1)\n return np.average(bin_vals, weights=hist[\"counts\"])\n\n\ndef hist_var(hist):\n bin_vals = np.arange(hist[\"min\"], hist[\"max\"] + 1)\n mean = hist_mean(hist)\n\n return np.average((bin_vals - mean) ** 2, weights=hist[\"counts\"])\n\n\ndef hist_std(hist):\n return math.sqrt(hist_var(hist))\n\n\ndef plot_hist(hist, log_y=True):\n bin_vals = np.arange(hist[\"min\"], hist[\"max\"] + 1)\n plt.bar(bin_vals, hist[\"counts\"], width=1, color=\"black\")\n if log_y:\n plt.gca().set_yscale(\"log\")\n plt.xlabel(\"ADU\", fontsize=16)\n plt.ylabel(\"n samples\", fontsize=16)\n plt.show()\n\n\ndef make_sw_trig_histogram(session, channel, wfm_period=3,\n n_waveforms=1000, blocksize=0):\n \"\"\" Acquires software triggered waveforms and returns an\n ADC sample histogram dictionary \"\"\"\n\n session.startDEggSWTrigStream(channel, wfm_period)\n\n total_counts = None\n wfm = None\n inputWfs = []\n for _ in range(n_waveforms):\n if blocksize == 0:\n # Read waveforms one-at-a-time\n wfm = parseTestWaveform(session.readWFMFromStream())\n else:\n # Read waveforms in blocks\n if len(inputWfs) == 0:\n inputWfs = session.readWFBlock(blocksize)\n if len(inputWfs) == 0:\n raise RuntimeError(\"Read empty waveform block\")\n wfm = inputWfs.pop()\n\n if wfm[\"channel\"] != channel:\n raise RuntimeError(\"Read a waveform from the wrong channel!\")\n\n # histogram the ADC samples from this waveform\n wfm_counts = np.bincount(wfm[\"waveform\"], minlength=(1 << 14))\n\n # add this waveform's ADC histogram to the total histogram\n if total_counts is None:\n total_counts = wfm_counts\n else:\n total_counts += wfm_counts\n\n session.endStream()\n\n # prepare the histogram dictionary\n nonzero_args = np.argwhere(total_counts > 0)\n min_samp = nonzero_args[0][0]\n max_samp = nonzero_args[-1][0]\n\n histogram = {\n \"counts\": total_counts[min_samp : max_samp + 1],\n \"min\": min_samp,\n \"max\": max_samp,\n }\n\n return histogram\n\n\ndef calculate_quantiles(hist):\n \"\"\"calculates the one percent and 99 percent quantiles\n Used in D-Egg and mDOM STF tests\n \"\"\"\n cdf = np.cumsum(hist[\"counts\"]) / np.sum(hist[\"counts\"])\n\n # smallest x where p(ADC <= x) >= 0.01\n one_pct_q = hist[\"min\"] + np.argwhere(cdf >= 0.01)[0][0]\n\n # smallest x where p(ADC <= x) >= 0.99\n ninetynine_pct_q = hist[\"min\"] + np.argwhere(cdf >= 0.99)[0][0]\n\n return one_pct_q, ninetynine_pct_q\n\n\ndef print_hist_stats(hist):\n print(\"Hist metrics:\")\n print(f'n samples: {hist[\"counts\"].sum()}')\n print(f\"mean: {hist_mean(hist)}\")\n print(f\"std: {hist_std(hist)}\")\n print(f'min: {hist[\"min\"]}')\n print(f'max: {hist[\"max\"]}')\n","repo_name":"toyoyama6/degg_scan","sub_path":"software/STM32Tools/python/DEggTest/adc_histogram.py","file_name":"adc_histogram.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4399321691","text":"#!/usr/bin/env python3\n\"\"\"\nTests for e164convert\n\"\"\"\nimport imp \nimport json\nimport unittest\n\nec = imp.load_source(\"e164convert\", \"e164convert\") #has no .py extension\n\ndef load_cases(filename: str) -> list:\n \"\"\"Load test cases list from the relevant file in testcases/\"\"\"\n try:\n with open(f\"testcases/{filename}.json\", \"r\") as f:\n return json.load(f)\n except FileNotFoundError as e:\n raise FileNotFoundError(f\"Test case file '{filename}.json' not found under testcases/\")\n\nclass TestClean(unittest.TestCase):\n \"\"\"Tests for clean(user_input: str)\"\"\"\n \n def test_clean(self):\n for case in load_cases(\"unit_clean\"):\n self.assertEqual(ec.clean(case[0]), case[1], f\"clean('{case[0]}') should return {case[1]}\")\n\nclass TestIsUKValid(unittest.TestCase):\n \"\"\"Tests for is_uk_valid(phone_number: str)\"\"\"\n \n def test_is_uk_valid(self):\n for case in load_cases(\"unit_isukvalid\"):\n self.assertEqual(ec.is_uk_valid(case[0]), case[1], f\"is_uk_valid('{case[0]}') should return {case[1]}\")\n\nclass TestGetE164(unittest.TestCase):\n \"\"\"Tests for get_e164(phone_number: str)\"\"\"\n \n def test_get_e164(self):\n for case in load_cases(\"unit_gete164\"):\n self.assertEqual(ec.get_e164(case[0]), case[1], f\"get_e164('{case[0]}') should return {case[1]}\")\n\nclass TestDiagnoseCountry(unittest.TestCase):\n \"\"\"Tests for diagnose_country(phone_number: str)\"\"\"\n\n def test_diagnose_country(self):\n for case in load_cases(\"unit_diagnosecountry\"):\n self.assertEqual(ec.diagnose_country(case[0]), case[1], f\"diagnose_country('{case[0]}') should return {case[1]}\")\n\n\ndef main():\n unittest.main()\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"okayvid/e164convert","sub_path":"unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19793427723","text":"\nimport turtle\n\ndef Levy(l, level):\n if level != 0:\n turtle.left(45)\n Levy(((l ** 2)/2)**0.5, level - 1)\n turtle.right(90)\n Levy(((l ** 2)/2)**0.5, level - 1)\n turtle.left(45)\n else:\n turtle.forward(l)\n return\n\nLevy(400, 10)\nturtle.mainloop()","repo_name":"VIadik/School","sub_path":"fractal/levy.py","file_name":"levy.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22421167681","text":"import os\nimport time\n\nimport torch\nfrom config.options_conf import args_parser\nfrom http_request.async_request import AsyncRequest\nfrom loguru import logger\nfrom module.models_module import Darknet\nfrom utils.common_utils import CommonUtils\nfrom utils.tool_utils import set_random_seed, timer, weights_init_normal\n\n# arguments parsing\nargs = args_parser()\n\n\nclass Server(object):\n \"\"\" the server class is responsible for scheduling each client\n to participate in federated training, testing and detecting \"\"\"\n # set random seed for list, numpy, CPU, current GPU and all the GPUs\n set_random_seed(args)\n\n # create folders for saving model and log information\n args.model_folder_path = os.path.join(\"./save\")\n args.log_folder_path = os.path.join(\"./log\")\n\n if not os.path.exists(args.model_folder_path):\n os.makedirs(args.model_folder_path)\n if not os.path.exists(args.log_folder_path):\n os.makedirs(args.log_folder_path)\n\n # add device, model and log file path arguments\n args.device = torch.device(\"cpu\")\n\n args.model_file_path = os.path.join(args.model_folder_path,\n \"D_{}_M_{}_SE_{}_CE_{}.pkl\".format(args.dataset, args.model,\n args.server_epoch, args.client_epoch))\n args.log_file_path = os.path.join(args.log_folder_path,\n \"D_{}_M_{}_SE_{}_CE_{}.log\".format(args.dataset, args.model,\n args.server_epoch, args.client_epoch))\n\n # initialize log output configuration\n logger.add(args.log_file_path)\n\n # initiate model and load pretrained model weights\n model = Darknet(config_path=args.model_def, image_size=args.image_size).to(args.device)\n model.apply(weights_init_normal)\n\n if args.pretrained_weights:\n if args.pretrained_weights.endswith(\".pth\"):\n model.load_state_dict(torch.load(args.pretrained_weights))\n else:\n model.load_darknet_weights(args.pretrained_weights)\n\n ip_lst = [ip for ip in args.client_ips.split(\",\")]\n port_lst = [port for port in args.client_ports.split(\",\")]\n federated_train_size_urls = [\"http://{}:{}/federated_train_size\".format(ip, port) for (ip, port) in\n zip(ip_lst, port_lst)]\n federated_train_urls = [\"http://{}:{}/federated_train\".format(ip, port) for (ip, port) in zip(ip_lst, port_lst)]\n federated_test_urls = [\"http://{}:{}/federated_test\".format(ip, port) for (ip, port) in zip(ip_lst, port_lst)]\n federated_detect_urls = [\"http://{}:{}/federated_detect\".format(ip, port) for (ip, port) in zip(ip_lst, port_lst)]\n\n server_model_params = model.state_dict()\n best_model_params = None\n client_ratio_lst = []\n\n @classmethod\n def call_async_request(cls, method: str = \"\", urls: list = [], data: dict = {}):\n \"\"\" async request for different methods\n (call_federated_train_size, call_federated_train, call_federated_test, call_federated_detect) \"\"\"\n async_request = AsyncRequest()\n\n if method == \"call_federated_train_size\":\n url_info_list = [\n {\n \"url\": url,\n \"method\": \"GET\",\n \"task_name\": url,\n \"response_data_type\": \"JSON\",\n \"set_time_out\": \"false\"\n }\n for url in urls\n ]\n else:\n url_info_list = [\n {\n \"url\": url,\n \"method\": \"POST\",\n \"data\": data,\n \"task_name\": url,\n \"response_data_type\": \"READ\",\n \"set_time_out\": \"false\"\n }\n for url in urls\n ]\n\n async_request.add_tasks_func(url_info_list)\n\n if method == \"call_federated_train\":\n async_request_cnt = 0\n while True:\n time.sleep(1)\n if len(async_request.task_info) != len(urls):\n async_request_cnt += 1\n if async_request_cnt % 600 == 0:\n logger.info(\"async request for {} times, {} minutes, not finished!\".format(async_request_cnt,\n async_request_cnt // 60))\n else:\n break\n else:\n while True:\n time.sleep(1e-5)\n if len(async_request.task_info) == len(urls):\n break\n\n async_request_result_dict = {}\n for url in urls:\n if method == \"call_federated_train_size\":\n async_request_result_dict[url] = async_request.task_info[url][\"data\"][\"data\"]\n else:\n async_request_result_dict[url] = CommonUtils.get_object_by_pickle_bytes_func(\n async_request.task_info[url][\"data\"])\n async_request.destroy_task_func(url)\n\n return async_request_result_dict\n\n @classmethod\n def call_federated_train_size(cls):\n \"\"\" get the training data ratio of each client \"\"\"\n with timer(\"call federated train size\", logger):\n async_federated_train_size_request_result_dict = cls.call_async_request(method=\"call_federated_train_size\",\n urls=cls.federated_train_size_urls)\n for federated_train_size_url in cls.federated_train_size_urls:\n federated_train_size = async_federated_train_size_request_result_dict[federated_train_size_url][\n \"federated_train_size\"]\n cls.client_ratio_lst.append(federated_train_size)\n\n logger.info(\"before normalization: client_ratio_lst: {}\".format(cls.client_ratio_lst))\n client_ratio_sum = sum(cls.client_ratio_lst)\n cls.client_ratio_lst = [ratio / client_ratio_sum for ratio in cls.client_ratio_lst]\n logger.info(\"after normalization: client_ratio_lst: {}\".format(cls.client_ratio_lst))\n\n @classmethod\n def call_federated_train(cls):\n \"\"\" call the model of each client for federated training \"\"\"\n with timer(\"call federated train\", logger):\n train_loss = []\n best_epoch = None\n best_loss = float(\"inf\")\n\n for epoch in range(1, args.server_epoch + 1):\n with timer(\"train for epoch {}/{}\".format(epoch, args.server_epoch), logger):\n federated_train_param_dict = {\"server_epoch\": CommonUtils.get_pickle_bytes_by_object_func(epoch),\n \"server_model_params\": CommonUtils.get_pickle_bytes_by_object_func(\n cls.server_model_params)}\n async_federated_train_request_result_dict = cls.call_async_request(method=\"call_federated_train\",\n urls=cls.federated_train_urls,\n data=federated_train_param_dict)\n\n avg_loss = 0.0\n client_weight_lst = []\n\n for idx, federated_train_url in enumerate(cls.federated_train_urls):\n returned_client_model_params = async_federated_train_request_result_dict[federated_train_url][\n \"client_model_params\"]\n returned_epo_avg_loss = async_federated_train_request_result_dict[federated_train_url][\n \"epo_avg_loss\"]\n\n # update the average training loss of all clients for the epoch\n avg_loss += (returned_epo_avg_loss - avg_loss) / (idx + 1)\n\n client_weight_lst.append(returned_client_model_params)\n\n for key in client_weight_lst[-1].keys():\n client_weight_lst[-1][key] = cls.client_ratio_lst[-1] * client_weight_lst[-1][key]\n for idx in range(0, len(client_weight_lst) - 1):\n client_weight_lst[-1][key] += cls.client_ratio_lst[idx] * client_weight_lst[idx][key]\n\n cls.server_model_params = client_weight_lst[-1]\n\n logger.info(\"epoch {:3d}, average loss {:.3f}\".format(epoch, avg_loss))\n train_loss.append(avg_loss)\n\n # save the model, loss and epoch with the smallest training average loss for all the epochs\n if avg_loss < best_loss:\n best_loss = avg_loss\n best_epoch = epoch\n cls.best_model_params = cls.server_model_params\n\n logger.info(\"best train loss: {}\".format(best_loss))\n logger.info(\"best epoch: {}\".format(best_epoch))\n CommonUtils.get_pickle_file_by_object_func(target=cls.best_model_params,\n write_file_path=args.model_file_path)\n\n @classmethod\n def call_federated_test(cls):\n \"\"\" send the best model to all the clients for testing after the federated training \"\"\"\n with timer(\"call federated test\", logger):\n federated_test_param_dict = {\n \"best_model_params\": CommonUtils.get_pickle_bytes_by_object_func(cls.best_model_params)}\n cls.call_async_request(method=\"call_federated_test\", urls=cls.federated_test_urls,\n data=federated_test_param_dict)\n\n @classmethod\n def call_federated_detect(cls):\n \"\"\" send the best model to all the clients for detecting after the federated training \"\"\"\n with timer(\"call federated detect\", logger):\n federated_detect_param_dict = {\n \"best_model_params\": CommonUtils.get_pickle_bytes_by_object_func(cls.best_model_params)}\n async_federated_detect_request_result_dict = cls.call_async_request(method=\"call_federated_detect\",\n urls=cls.federated_detect_urls,\n data=federated_detect_param_dict)\n\n # receive the log and json files from clients\n for idx, federated_detect_url in enumerate(cls.federated_detect_urls):\n for result_key in async_federated_detect_request_result_dict[federated_detect_url]:\n if \".log\" in result_key:\n client_log_file_path = os.path.join(args.log_folder_path, result_key)\n with timer(\"writing to {}\".format(client_log_file_path), logger):\n with open(client_log_file_path, mode=\"w\", encoding=\"utf-8\") as fl:\n fl.write(async_federated_detect_request_result_dict[federated_detect_url][result_key])\n elif \".json\" in result_key:\n client_submission_file_path = os.path.join(args.log_folder_path, result_key)\n with timer(\"writing to {}\".format(client_submission_file_path), logger):\n CommonUtils.get_json_file_by_object_func(\n target=async_federated_detect_request_result_dict[federated_detect_url][result_key],\n write_file_path=client_submission_file_path)\n\n\nif __name__ == \"__main__\":\n Server.call_federated_train_size()\n Server.call_federated_train()\n Server.call_federated_test()\n Server.call_federated_detect()\n","repo_name":"JinYuLite/finals-helmet-baseline","sub_path":"contestant-server/service/federated/server_service.py","file_name":"server_service.py","file_ext":"py","file_size_in_byte":11782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19146394119","text":"import socket\nimport sys\n\nif len (sys.argv) > 2:\n server = sys.argv[1]\n portno = int(sys.argv[2])\n total_sent = 0;\n total_recv = 0;\n\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Bind the socket to the port\n server_address = ('', portno)\n print >>sys.stderr, 'starting up on %s port %s' % server_address\n sock.bind(server_address)\n\n # Listen for incoming connections\n sock.listen(1)\n\n try:\n\n while True:\n # Wait for a connection\n print >>sys.stderr, 'waiting for a connection'\n connection, client_address = sock.accept()\n\n try:\n print >>sys.stderr, 'connection from', client_address\n\n # Receive the data in small chunks and retransmit it\n pkt_total = 0\n while True:\n data = connection.recv(1024)\n if data:\n pkt_total += len(data)\n total_recv += len(data)\n print >>sys.stderr, 'received %d bytes' % total_recv\n else:\n break\n\n\n # Create a TCP/IP socket\n sendsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (server, portno+1)\n print >>sys.stderr, 'connecting to %s port %s' % server_address\n sendsock.connect(server_address)\n\n try:\n # Send data\n message = bytearray([1 for b in range(pkt_total) ])\n sendsock.sendall(message)\n\n total_sent += len(message)\n print >>sys.stderr, 'sent %d bytes' % total_sent\n\n finally:\n print >>sys.stderr, 'closing socket'\n sendsock.close()\n\n finally:\n # Clean up the connection\n connection.close()\n finally:\n sock.close()\n\n\n \n","repo_name":"jaredivey/ns-3-dce-1.7","sub_path":"myscripts/python/testscripts/client_server.py","file_name":"client_server.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"72028171076","text":"from tests.data_generators import PlanGenerator\n\nfrom .flask import LogInUser, ViewTestCase\n\n\nclass AccountantTests(ViewTestCase):\n def setUp(self) -> None:\n super().setUp()\n self.plan_generator = self.injector.get(PlanGenerator)\n\n def test_that_logged_in_accountant_gets_redirected(self) -> None:\n plan = self.plan_generator.create_plan()\n self.assert_response_has_expected_code(\n url=f\"/accountant/plans/{plan.id}/approve\",\n method=\"post\",\n login=LogInUser.accountant,\n expected_code=302,\n )\n\n def test_that_unauthenticated_user_gets_redirected(self) -> None:\n plan = self.plan_generator.create_plan()\n self.assert_response_has_expected_code(\n url=f\"/accountant/plans/{plan.id}/approve\",\n method=\"post\",\n login=None,\n expected_code=302,\n )\n","repo_name":"arbeitszeit/arbeitszeitapp","sub_path":"tests/flask_integration/test_approve_plan_view.py","file_name":"test_approve_plan_view.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"62"} +{"seq_id":"23557127944","text":"# version 1.0\n# by yidongxu\n#用法:multiwfn处理后,导出的txt会在极大值和极小值前面加一个*,影响脚本16和17的数据提取,需要首先对*号进行删除并用空格代替\nimport os\n\n# 获取当前文件夹下的所有子文件夹\nsubfolders = [f.path for f in os.scandir('./') if f.is_dir()]\n\n# 定义要替换的字符*变空格\nold_char = '*'\nnew_char = ' '\n\n# 遍历每个子文件夹,并读取其中的surfanalysis.txt文档进行替换\nfor folder in subfolders:\n surfanalysis_path = os.path.join(folder, 'surfanalysis.txt')\n if os.path.exists(surfanalysis_path):\n with open(surfanalysis_path, 'r') as f:\n content = f.read()\n content = content.replace(old_char, new_char)\n with open(surfanalysis_path, 'w') as f:\n f.write(content)\n","repo_name":"yidongxu125/Scripts","sub_path":"EPS_MEP/提取surfanalysis文件中的极大极小值/space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2266880773","text":"from csv import reader\r\ns=\"Googlebot/2.1;\"\r\nwith open(r\"I:\\CSV files\\Alpha5-master\\files\\access-log.txt\") as f:\r\n count1=0\r\n for line in f:\r\n if line==\" \":\r\n count1+=1\r\n print(line)\r\n if line.strip():\r\n l=line.split()\r\n for word in l:\r\n if word==s:\r\n count1+=l.count(word)\r\n print(count1)\r\n\r\n","repo_name":"Manju-1/python_selenium_project","sub_path":"Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9195820424","text":"import uuid\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass User(AbstractUser):\n id = models.UUIDField(\n verbose_name='id',\n primary_key=True,\n editable=False,\n default=uuid.uuid4\n )\n email = models.EmailField(\n verbose_name='email',\n unique=True,\n blank=False,\n null=True\n )\n profile_img = models.ImageField(\n verbose_name='Profile Picture',\n null=True,\n blank=True,\n default='User.png'\n )\n\n def __str__(self) -> str:\n return str(self.get_full_name())\n\n class Meta:\n verbose_name = 'Usuário'\n verbose_name_plural = 'Usuários'\n","repo_name":"philip-persan/django-todo-app","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18189961934","text":"# version=2020/11/19\r\nimport sys # to use the argv\r\nimport socket \r\nclose_recv = \"close\" # to close the unuse recv\r\n\r\ndef command(cmd, s, u, addr):\r\n\t\r\n\t# use udp\r\n\t\"\"\"\r\n\tif \"register\" in cmd or cmd == \"whoami\":\r\n\t\ts.sendall(close_recv.encode())\r\n\t\tu.sendto(cmd.encode(), addr)\r\n\t\tdata, _ = u.recvfrom(1024)\r\n\t\tprint(str(data, encoding='utf-8'))\r\n\t\"\"\"\r\n\tif \"register\" in cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\t# u.sendto(close_recv.encode(), addr)\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\t# use tcp\r\n\telif \"login\" in cmd or cmd == \"logout\" or cmd==\"list-user\":\r\n\t\ts.sendall(cmd.encode())\r\n\t\t# u.sendto(close_recv.encode(), addr)\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\r\n\t## hw2 command\r\n\telif \"create-board\" in cmd or \"create-post\" in cmd or \"list-board\" in cmd \\\r\n\t\tor \"list-post\" in cmd or \"read\" in cmd or \"delete-post\" in cmd or \"update-post\" in cmd \\\r\n\t\tor \"comment\" in cmd:\r\n\t\ts.sendall(cmd.encode())\r\n\t\t# u.sendto(close_recv.encode(), addr)\r\n\t\tdata = str(s.recv(1024), encoding='utf-8')\r\n\t\tprint(data)\r\n\r\nif __name__ == \"__main__\":\r\n\t## the host and service\r\n\tif len(sys.argv) != 3:\r\n\t\tsys.exit(\"iter_tcp_client.py {host} {portnumber}\")\r\n\telse:\r\n\t\tport = int(sys.argv[2])\r\n\t\thost = sys.argv[1]\r\n\taddr = (host, port)\r\n\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tu = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\ts.connect(addr)\r\n\r\n\t# recv welcome\r\n\twel = str(s.recv(1024), encoding='utf-8')\r\n\tprint(wel)\r\n\t\r\n\twhile True:\r\n\t\tcmd = input(\"% \")\r\n\t\t# print(\"Test input: %s\" % cmd)\r\n\t\t\r\n\t\tif cmd==\"exit\":\r\n\t\t\ts.sendall(cmd.encode())\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcommand(cmd, s, u, addr)\r\n\tu.close()\r\n\ts.close()","repo_name":"KJ-black/Introduce-Network-Programming","sub_path":"HW2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26062819831","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport argparse\nimport time\nimport os\nfrom webdriver_manager.chrome import ChromeDriverManager as CM\nfrom pytube import YouTube\nfrom moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip\nfrom moviepy.editor import *\nfrom math import *\nimport shutil\n\n#----------------------------------------------------------------------------------------\n\n# ___ ___ _ _ ___ ___ ___ \n# / __| / _ \\ | \\| | | __| |_ _| / __|\n#| (__ | (_) | | .` | | _| | | | (_ |\n# \\___| \\___/ |_|\\_| |_| |___| \\___|\n\ntiktok_sessionID = \"your_tiktok_sessionid\" #tiktok sessionid (Se rendre sur https://www.tiktok.com/ - Se connecter - Recuperer la valeur du cookie \"sessionid\")\nuploadInterval = 60 #Interval entre la publication de deux videos en secondes\ntoDownload = [\"https://www.youtube.com/watch?v=2lAe1cqCOXo\",\n \"https://www.youtube.com/watch?v=8qTQbk2A02M\",\n \"https://www.youtube.com/watch?v=FzytYMcq-Qg\"] #Liens des vidéos a publier sous forme d'un tableau ex : [\"https://...\", \"https://...\",\"https://...\"]\nhashtags = \"#tiktok #fyp\" #hashtags sous la video\n#----------------------------------------------------------------------------------------\n\nclass TikTokBot:\n def __init__(self, who_can_view, video_path, caption):\n path = os.path.dirname(os.path.abspath(__file__))\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n self.driver = webdriver.Chrome(options=options, executable_path=CM().install())\n self.driver.set_window_size(1440, 900)\n self.executor_url = self.driver.command_executor._url\n self.session_id = self.driver.session_id\n print(self.executor_url, self.session_id)\n self.driver.get('https://tiktok.com')\n self.driver.add_cookie({'name' : 'sessionid', 'value' : tiktok_sessionID, 'domain' : '.tiktok.com'})\n time.sleep(1)\n self.driver.get('https://www.tiktok.com/upload/?lang=en')\n self.url = self.driver.current_url \n print(self.url)\n\n # takes you to the upload page\n time.sleep(2)\n self.driver.execute_script(\"window.scrollTo(0,500);\")\n\n # upload video from files\n while True:\n try:\n self.driver.find_element_by_xpath('//input[contains(@name, \"upload-btn\")]').send_keys(video_path)\n break\n except NoSuchElementException:\n time.sleep(2)\n\n # set caption\n self.driver.find_element_by_xpath('//div[contains(@class, \"notranslate public-DraftEditor-content\")]').send_keys(caption)\n\n # make sure video is uploaded\n while True:\n try:\n video = self.driver.find_element_by_css_selector('video[src^=\"blob\"]')\n source = video.get_attribute('src')\n print('video uploaded, wait for submitting')\n break\n except NoSuchElementException:\n print('video uploading...')\n time.sleep(10)\n\n # click submit\n print('submitting...')\n self.driver.find_element_by_xpath('//button[contains(text(), \"Post\")]').click()\n print('DONE! ' + caption + \" is published\")\n print(\"Wait \" + str(uploadInterval) + \" seconds before uploading next video...\")\n time.sleep(uploadInterval)\n self.driver.close()\n \ndef validFileName(str):\n prohibedChars = \"'.:#|,/<>\\\"\\\\?*\"\n for prohibedChar in list(prohibedChars):\n str = str.replace(prohibedChar, \"\")\n return str;\n\nfor i in toDownload:\n yt = YouTube(i)\n yt.streams.first().download('./videos/brut')\n\n title = validFileName(yt.title)\n\n clip = VideoFileClip(\"./videos/brut/\" + title +\".mp4\")\n parts = ceil(clip.duration/50)\n for x in range(parts):\n part = x + 1\n if(x == parts-1):\n end = clip.duration\n else:\n end = part*50\n\n start = x*50\n ffmpeg_extract_subclip(\"./videos/brut/\" + title + \".mp4\", start, end, targetname=\"./videos/cut/\" + title +\"$$part\" + str(part).zfill(2) + \".mp4\") \n time.sleep(5)\n titleComplete = title + \" - part \" + str(part)\n TikTokBot(who_can_view=\"Public\", video_path=\"./videos/cut/\" + title + \"$$part\" + str(part).zfill(2) + \".mp4\", caption=titleComplete + \" \" + hashtags)\n time.sleep(1)\n os.remove(\"./videos/cut/\" + title + \"$$part\" + str(part).zfill(2) + \".mp4\")\n\n","repo_name":"CorentinStevens/tiktok-autopost-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"16316670070","text":"from selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom other_folder.drivers import get_driver, login, close_browser\nfrom config import user, password\n\n\ndef open_wm_cnf(driver):\n button = driver.find_element(By.CSS_SELECTOR, \"button[name*='#WM_CNF)']\")\n button.click()\n\n button = driver.find_element(By.CSS_SELECTOR, \"button[name*='#X_I01']\")\n button.click()\n\n\ndef get_data_from_table(driver):\n table = driver.find_element_by_id(\"info_string_table\").text\n data = table.split(\"\\n\")[0].split()\n storage = data[3] + data[4]\n return storage\n\n\ndef confirm_to(driver, pallets):\n for pallet in pallets:\n to_button = driver.find_element(By.CSS_SELECTOR, f\"button[name*='{pallet}']\")\n to_button.click()\n\n storage = get_data_from_table(driver)\n\n pallet_field = driver.find_element_by_id(\"p_field\")\n pallet_field.send_keys(pallet)\n pallet_field.send_keys(Keys.RETURN)\n\n barcode_field = driver.find_element_by_id(\"lv_beep_b\")\n barcode_field.send_keys(f\"LVS{storage}\")\n\n barcode_confirm = driver.find_element_by_id(\"beep_sim\")\n barcode_confirm.click()\n\n print(f\"TO for PALLET {pallet} confirmed\")\n\n\ndef main_confirm_to(pallets):\n driver = get_driver()\n login(driver)\n open_wm_cnf(driver)\n confirm_to(driver, pallets)\n close_browser(driver)\n\n\nif __name__ == '__main__':\n # wd = get_driver()\n # login(wd, user, password)\n palls = [5065606130]\n main_confirm_to(palls)\n","repo_name":"tichymartin/SAPGUIscripting","sub_path":"trx_folder/trx_confirm_to_inbound.py","file_name":"trx_confirm_to_inbound.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"28759216651","text":"\ndef binaryToDecimal(binaryStr):\n t = list(binaryStr)\n p = []\n total = 0\n for i in t:\n i = int(i)\n p.append(i)\n for i in range(1, len(p)+1):\n total += p[i-1] * 2 ** (len(p) - i)\n return total\n \n\n \n\n \n \n","repo_name":"zhexxian/SUTD-The-Digital-World","sub_path":"Homework/coding_week6/HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"45300685694","text":"# worst -> O(nlogn)\n# average -> theta(nlogn)\n# auxilary space -> n\n\n# divide and conquer\n# while merging do sorting\n# while merging start comparing from first element of each sub list\n\n\ndef sorting(data):\n # when l and r hae one-one element then this recursion will return and l and r array with one element to above\n # caller stack.\n # then that above function will sort those and return new updated l and r to its above caller stack ad so on.\n if len(data)>1:\n # divide\n mid = len(data)//2\n r = data[mid:]\n l = data[:mid]\n # get the sored left array\n sorting(l)\n # get the right sorted array\n sorting(r)\n\n i = j = t = 0\n # sort and merge\n while i< len(l) and j (give a name) => \"Run with highest privileges\" - Check\n# Triggers Tab => New => At Startup\n# Actions Tab => New => Start A Program => Browse(program)\n# Conditions Tab => on AC Power (Uncheck)\n\n\nhost_file_path = r\"C:\\Windows\\System32\\drivers\\etc\\hosts\"\nlocal_page = \"127.0.0.1\"\naddress_list = [\"sg.yahoo.com\",\"yahoo.com\",\"www.yahoo.com\"]\n","repo_name":"22mn/py","sub_path":"py-samples/block-samples.py","file_name":"block-samples.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42972348132","text":"\"\"\"Reddit Streamable bot.\r\n\r\nA bot for Reddit that mirror videos from posts of specified domains\r\nto Streamable and replies with the link.\r\n\"\"\"\r\nimport logging\r\nimport traceback\r\nimport time\r\nfrom logging import INFO, ERROR\r\n\r\nfrom prawcore.exceptions import Forbidden, RequestException, ResponseException\r\nfrom requests.exceptions import Timeout, ConnectionError, ReadTimeout, HTTPError\r\nfrom requests import request\r\n\r\n\r\nlog = logging.getLogger('reddit_streamable_bot')\r\n\r\n\r\nclass Bot:\r\n\t_UPLOAD_URL = 'https://api.streamable.com/import?url={url}'\r\n\t_VIDEO_URL = 'https://streamable.com/{shortcode}'\r\n\t_VREDDIT_URL = 'https://vredd.it/ajax_process.php'\r\n\t_RETRY_STATUSES = {500, 502, 503, 504}\r\n\tCE_SLEEP = 10 # Seconds to sleep in case of ConnectionError.\r\n\tREPLY_TEMPLATE = ('[Streamable Mirror]({link})\\n\\n'\r\n\t\t\t\t\t '*I am a bot, and this action was performed '\r\n\t\t\t\t\t 'automatically.*')\r\n\r\n\t@staticmethod\r\n\tdef _log(lvl, post, msg):\r\n\t\tfor string in [f'Submission: {post.shortlink}',\r\n\t\t\t\t\t f'Submission_url: {post.url}',\r\n\t\t\t\t\t f'Mirror: {msg}']:\r\n\t\t\tlog.log(lvl, string)\r\n\t\t\t\t\r\n\tdef __init__(self, reddit, subreddit_name, *, streamable_auth, domains,\r\n\t\t\t\t streamable_user_agent, sticky=False):\r\n\t\t\"\"\"Initialize a Bot instance.\r\n\r\n\t\tParameters:\r\n\t\t\treddit: authenticated (not read-only) praw.Reddit instance.\r\n\t\t\tsubreddit_name: string, name of the subreddit.\r\n\t\t\tstreamable_auth: tuple, streamable credentials (username, password).\r\n\t\t\tdomains: list of domains.\r\n\t\t\tsticky: boolean, if True the comment will be sticky.\r\n\t\t\"\"\"\r\n\t\tself._reddit = reddit\r\n\t\tself._subreddit = self._reddit.subreddit(subreddit_name)\r\n\t\tself._streamable_auth = streamable_auth\r\n\t\tself._streamable_headers = {'User-Agent': streamable_user_agent}\r\n\t\tself._errors = 0\r\n\t\tself._offline = False\r\n\t\tself.domains = domains\r\n\t\tself.sticky = sticky\r\n\r\n\tdef _request(self, method, url, *, max_retries=3, **kwargs):\r\n\t\tretries = 0\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tr = request(method, url, timeout=10, **kwargs)\r\n\t\t\t\tif (r.status_code not in self._RETRY_STATUSES \r\n\t\t\t\t\t\tor retries >= max_retries):\r\n\t\t\t\t\treturn r\r\n\t\t\t\ttime.sleep(5)\r\n\t\t\texcept (Timeout, ConnectionError) as exception:\r\n\t\t\t\tif retries >= max_retries:\r\n\t\t\t\t\traise\r\n\t\t\t\tif isinstance(exception, ConnectionError):\r\n\t\t\t\t\ttime.sleep(self.CE_SLEEP)\r\n\t\t\tretries += 1\r\n\r\n\tdef _video_url(self, submission):\r\n\t\tif submission.domain == 'v.redd.it':\r\n\t\t\tself._request('POST', self._VREDDIT_URL,\r\n\t\t\t\t\t\t data={'url': submission.url})\r\n\t\t\treturn f'https://vredd.it/files/{submission.url[18:]}.mp4'\r\n\t\treturn submission.url\r\n\r\n\tdef main(self, submission):\r\n\t\ttry:\r\n\t\t\tresp = self.mirror(self._video_url(submission))\r\n\t\t\tif resp is None:\r\n\t\t\t\tmsg = \"Video over 10 min or couldn't be processed\"\r\n\t\t\t\tself._log(INFO, submission, msg)\r\n\t\t\telif isinstance(resp, int):\r\n\t\t\t\tif resp == 404:\r\n\t\t\t\t\tself._log(INFO, submission, 'Invalid video link')\r\n\t\t\t\telse:\r\n\t\t\t\t\traise HTTPError(f'Streamable HTTP {resp} response')\r\n\t\t\telse:\r\n\t\t\t\tself._log(INFO, submission, resp)\r\n\t\t\t\treply = self.REPLY_TEMPLATE.format(link=resp)\r\n\t\t\t\tcomment = submission.reply(reply)\r\n\t\t\t\tlog.info(f'Reply: https://www.reddit.com{comment.permalink}')\r\n\t\t\t\tif self.sticky:\r\n\t\t\t\t\tcomment.mod.distinguish(how='yes', sticky=True)\r\n\t\t\treturn None\r\n\t\texcept HTTPError as http_error:\r\n\t\t\tself._log(ERROR, submission, str(http_error))\r\n\t\texcept KeyboardInterrupt:\r\n\t\t\tself._log(INFO, submission, 'User interrupted process')\r\n\t\t\traise\r\n\t\texcept Exception:\r\n\t\t\tself._log(ERROR, submission, 'Error\\n' + traceback.format_exc())\r\n\t\tself._errors += 1\r\n\r\n\tdef mirror(self, url):\r\n\t\t\"\"\"Upload a video from url to Streamable.\r\n\t\t\r\n\t\tParameters:\r\n\t\t\turl: string, the URL of the video.\r\n\r\n\t\tReturns: Streamable video URL if the upload was successful,\r\n\t\t\tNone if the video is over 10 min or the server can't\r\n\t\t\tprocess the video else the HTTP response status code.\r\n\t\t\"\"\"\r\n\t\tr = self._request('GET', self._UPLOAD_URL.format(url=url),\r\n\t\t\t\t\t\t headers=self._streamable_headers,\r\n\t\t\t\t\t\t auth=self._streamable_auth)\r\n\t\tif r.status_code == 200:\r\n\t\t\tvideo_link = self._VIDEO_URL.format(shortcode=r.json()['shortcode'])\r\n\t\t\ttime.sleep(10)\r\n\t\t\tr = self._request('HEAD', video_link)\r\n\t\t\tif r.status_code == 200:\r\n\t\t\t\treturn video_link\r\n\t\t\telif r.status_code == 404:\r\n\t\t\t\treturn None\r\n\t\treturn r.status_code\r\n\r\n\tdef run(self):\r\n\t\tstream = self._subreddit.stream.submissions\r\n\t\tlog.info('Starting')\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself._offline = False\r\n\t\t\t\tfor submission in stream(skip_existing=True):\r\n\t\t\t\t\tif submission.domain in self.domains:\r\n\t\t\t\t\t\tself.main(submission)\r\n\t\t\texcept KeyboardInterrupt:\r\n\t\t\t\tlog.info('User interrupted the program')\r\n\t\t\t\traise\r\n\t\t\texcept Exception as exception:\r\n\t\t\t\tif (isinstance(exception, RequestException) and\r\n\t\t\t\t\t\tisinstance(exception.original_exception,\r\n\t\t\t\t\t\t\t\t (ReadTimeout, ConnectionError))):\r\n\t\t\t\t\toe = exception.original_exception\r\n\t\t\t\t\tif isinstance(oe, ConnectionError):\r\n\t\t\t\t\t\tself._offline = True\r\n\t\t\t\t\t\ttime.sleep(self.CE_SLEEP)\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tself._errors += 1\r\n\t\t\t\tif (isinstance(exception, ResponseException) and \r\n\t\t\t\t\t\texception.response.status_code in self._RETRY_STATUSES):\r\n\t\t\t\t\tcode = exception.response.status_code\r\n\t\t\t\t\tlog.error(f'Reddit HTTP {code} response')\r\n\t\t\t\t\tlog.debug('Restarting in 5 s')\r\n\t\t\t\t\ttime.sleep(5)\r\n\t\t\t\telse:\r\n\t\t\t\t\tlog.exception('Error\\n')\r\n\t\t\t\t\tlog.info('Stopping')\r\n\t\t\t\t\tbreak\r\n","repo_name":"Stuart1998/reddit-streamable-bot","sub_path":"reddit_streamable_bot.py","file_name":"reddit_streamable_bot.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"22519269283","text":"'''\nDescription: \nVersion: 1.0\nAutor: 冰凝水\nDate: 2021-04-22 09:22:37\nLastEditTime: 2021-04-22 09:24:31\nFilePath: \\Leetcode\\LCP 01. 猜数字.py\n'''\n\n\"\"\"\nRESULT: Accept\nTIME: 40ms BEAT: 59.83% O(n) = \nMEMORY: 14.7MB BEAT: 84.27% O(n) = \nUSED TIME: 01:09\nLAST EDIT TIME: 2021年4月22日9:24:18\nDescription: \n\"\"\"\n\nclass Solution:\n def game(self, guess: List[int], answer: List[int]) -> int:\n cnt = 0\n for i in range(len(guess)):\n if guess[i] == answer[i]: \n cnt += 1\n return cnt","repo_name":"sunzhaoc/LeetCode","sub_path":"LCP 01. 猜数字.py","file_name":"LCP 01. 猜数字.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6957866928","text":"from matplotlib.markers import MarkerStyle\nimport numpy as np\nfrom numpy import full\nfrom sklearn import mixture\nfrom scipy.stats import multivariate_normal as mvn\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation, axes\nimport random\nimport colorsys\nfrom matplotlib.patches import Ellipse\n\ndef get_mean_std(arr):\n mean = np.mean(arr)\n std = np.std(arr)\n return mean, std\n\n\nclass Evaluator:\n def __init__(self, set_GT_length):\n self.set_GT_length = set_GT_length\n\n def draw_ellipse(self, position, covariance, color='r', ax=None, **kwargs):\n \"\"\"Draw an ellipse with a given position and covariance\"\"\"\n ax = ax or plt.gca()\n\n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n\n # Draw the Ellipse\n for nsig in range(1, 4):\n ellipse = Ellipse(position, nsig * width, nsig * height,\n angle, color=color, **kwargs)\n ax.add_patch(ellipse)\n\n def plot_gt(self,ax,scenario, people_colors):\n num_people = np.size(scenario.gt, axis=0)\n for i in range(num_people): # i is number of ped\n ax.plot(scenario.trajectories[i][:, 0], scenario.trajectories[i][:, 1], '.',linewidth=1,markersize=1,color=people_colors[i][0],label='Observations')\n ax.plot(scenario.gt[i][:, 0], scenario.gt[i][:, 1], 'D', linewidth=1,markersize=1,color=people_colors[i][1],label='Ground truth')\n\n def plot_predictions(self,ax,full_predictions, people_colors):\n num_people = full_predictions.trajectories[0].shape[1]\n num_pred_steps = full_predictions.trajectories[0].shape[0]\n for prediction_sample in full_predictions.trajectories:\n for i in range(num_people): # i is number of ped\n pred_traj = prediction_sample[:num_pred_steps, i, :]\n ax.plot(pred_traj[:, 0], pred_traj[:, 1], 'o',linewidth=1,markersize=2,color=people_colors[i][2],label='Predictions')\n \n # Prepares an array of distinct random colors for the N people of this scenario\n # Output structure, one row per person:\n # [ [color_obs,color_gt,color_pred],\n # [color_obs,color_gt,color_pred],\n # ...\n # [color_obs,color_gt,color_pred] ]\n def prepare_scenario_colorscheme(self, scenario):\n num_people = np.size(scenario.gt, axis=0)\n\n HSV_tuples_obs = [(x*1.0/num_people, 0.5, 0.4) for x in range(num_people)]\n RGB_tuples_obs = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples_obs))\n HSV_tuples_gt = [(x*1.0/num_people, 0.97, 0.7) for x in range(num_people)]\n RGB_tuples_gt = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples_gt))\n HSV_tuples_pred = [(x*1.0/num_people, 0.5, 0.95) for x in range(num_people)]\n RGB_tuples_pred = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples_pred))\n\n colors = [[c1,c2,c3] for c1,c2,c3 in zip(RGB_tuples_obs,RGB_tuples_gt,RGB_tuples_pred)]\n random.shuffle(colors)\n return colors\n\n def plot_scenario(self, dataset, scenario, full_predictions=None):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n people_colors = self.prepare_scenario_colorscheme(scenario)\n \n dataset.plot_map()\n if full_predictions:\n self.plot_predictions(ax,full_predictions, people_colors)\n self.plot_gt(ax, scenario, people_colors)\n\n if full_predictions and len(full_predictions.trajectories) > 1:\n _, gauss_scene = self.cal_nll(scenario,full_predictions)\n gauss_scene = np.array(gauss_scene)\n for pos, covar, color in zip(gauss_scene[:, 1], gauss_scene[:, 0],people_colors):\n self.draw_ellipse(pos, covar, color[2], alpha=0.3)\n \n handles, labels = self.filter_legend(ax)\n plt.legend(handles, labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.ylabel('y [m]')\n plt.xlabel('x [m]')\n plt.axis('equal')\n plt.show()\n return\n\n def filter_legend(self,ax):\n handles, labels = ax.get_legend_handles_labels()\n i = 1\n while i < len(labels):\n if labels[i] in labels[:i]:\n del (labels[i])\n del (handles[i])\n else:\n i += 1\n return handles, labels\n\n def draw_animation(self,dataset,scenario,prediction):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n num_samples = len(prediction.trajectories)\n num_people = prediction.trajectories[0].shape[1]\n people_colors = self.prepare_scenario_colorscheme(scenario)\n\n dataset.plot_map()\n self.plot_gt(ax,scenario,people_colors)\n \n dots = []\n for i in range(num_people):\n for s in range(num_samples):\n #ax.plot(prediction[s][:,i,0], prediction[s][:,i,1], color=people_colors[i][2]) # Plot the trajectories of each person\n dot, = ax.plot([], [], '.',linewidth=1,markersize=5, color=people_colors[i][2], label='Predictions')\n dots.append(dot) # Prepare the dots to be animated for each person\n\n handles, labels = self.filter_legend(ax)\n ax.legend(handles, labels,bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.)\n ax.axis('equal')\n #plt.title('helbing')\n\n def init():\n start = []\n for i in range(num_people):\n for s in range(num_samples):\n s = ax.plot(prediction.trajectories[s][0,i,0], prediction.trajectories[s][0,i,1], color=people_colors[i][2])\n start.append(s)\n return start\n\n def update_dot(j):\n counter = 0\n for i in range(num_people):\n for s in range(num_samples):\n dots[counter].set_data(prediction.trajectories[s][j,i,0], prediction.trajectories[s][j,i,1])\n counter = counter+1\n return dots\n\n ani = animation.FuncAnimation(fig, update_dot, frames=range(0, np.size(prediction.trajectories[0], axis=0)), interval=500, init_func=init)\n return ani\n\n # This function calculates ADE for single trajectory\n def cal_ade(self, predict, true): # traj should be (pre_step,2)\n squared_dist = (predict - true) ** 2\n temp = squared_dist.sum(axis=1)\n ade = np.sqrt(temp).sum()\n ade = ade / len(predict)\n return ade\n\n # This function calculates FDE for single trajectory\n def cal_fde(self, predict, true):\n squared_dist = (predict - true) ** 2\n fde = np.sqrt(squared_dist[-1, :].sum())\n return fde\n\n # This function calculates the ADE and FDE for:\n # - certain predictions with one particle\n # - uncertain predictions with multiple samples\n def evaluate_scenario_ade_fde(self, scenario, predictions):\n ade = []\n fde = []\n for prediction in predictions.trajectories:\n ade_single, fde_single = self.evaluate_single_particle_ade_fde(scenario, prediction)\n ade.append(ade_single)\n fde.append(fde_single)\n if len(ade) != 0:\n ade = np.mean(ade)\n fde = np.mean(fde)\n else:\n ade = np.nan\n fde = np.nan\n return ade, fde\n\n # Evaluate the predictions for a scenario\n def evaluate_single_particle_ade_fde(self, scenario, predictions):\n ade = []\n fde = []\n for i in range(np.size(scenario.gt, axis=0)): # i is number of ped\n if len(scenario.gt[i]) >= self.set_GT_length:\n true_length = len(scenario.gt[i])\n ade_single = self.cal_ade(predictions[:true_length, i, :], scenario.gt[i])\n fde_single = self.cal_fde(predictions[:true_length, i, :], scenario.gt[i])\n ade.append(ade_single)\n fde.append(fde_single)\n if len(ade) != 0:\n ade = np.mean(ade)\n fde = np.mean(fde)\n else:\n ade = np.nan\n fde = np.nan\n # if ade=[], return nan; else, return float\n return ade, fde\n\n # Calculating the kADE and kFDE for a single pedestrian\n # tra_gt is a single ground truth trajectory\n # predict contains multiple noisy trajectories\n def k_ade_fde_pointwise(self, predict, tra_gt):\n k_ade = []\n k_fde = -1\n len_gt = len(tra_gt)\n if len_gt >= self.set_GT_length:\n for i in range(len_gt):\n timestep_pred = predict[:,i,:]\n min_dist = min(np.sqrt(((timestep_pred-tra_gt[i,:])**2).sum(axis=1)))\n k_ade.append(min_dist)\n k_fde = min_dist\n return np.mean(k_ade), k_fde\n\n # Evaluate the predictions for a scenario\n def evaluate_scenario_kade_kfde(self, scenario, predictions):\n kADE = []\n kFDE = []\n for i in range(len(scenario.gt)): # for each pedestrian i\n single_ped_prediction = np.array([sample[:,i,:] for sample in predictions.trajectories])\n kade, kfde = self.k_ade_fde_pointwise(single_ped_prediction, scenario.gt[i])\n if not isinstance(kade, list):\n kADE.append(kade)\n kFDE.append(kfde)\n return np.mean(kADE), np.mean(kFDE)\n\n # Input is a prediction for a single pedestian\n # Model is fit to the last points\n def calculate_gmm(self, samples, gt_position):\n model = mixture.GaussianMixture()\n fitted_values = model.fit(samples)\n predicted_values = model.predict(samples)\n # compute centers as point of highest density of distribution\n density = mvn(cov=model.covariances_[0], mean=model.means_[0]).logpdf(samples)\n centers = samples[np.argmax(density)]\n\n nll = - mvn(cov=model.covariances_[0], mean=model.means_[0]).logpdf(gt_position)\n return model.covariances_[0], model.means_[0], centers, nll\n\n # input is un_sof(scene)\n def cal_nll(self, scenario, prediction):\n if len(prediction.trajectories) < 2:\n print(\"NLL calculation is not possible for a prediction with one sample.\")\n return -1\n num_ped = prediction.trajectories[0].shape[1]\n gauss_scene = []\n nll_scene = []\n for ped_id in range(num_ped):\n if (len(scenario.gt[ped_id]) >= self.set_GT_length) :\n # Selecting of the single person trajectory, [sample,time,position]\n chosen_ped = np.array([sample[:,ped_id,:] for sample in prediction.trajectories])\n len_gt = len(scenario.gt[ped_id])\n gt_position = scenario.gt[ped_id][-1]\n # Selecting of the single person trajectory at the last gt point in time, [sample,position]\n samples = chosen_ped[:, len_gt - 1, :]\n sigma, mu, centers, nll = self.calculate_gmm(samples, gt_position)\n gauss_scene.append([sigma, mu, centers, nll])\n nll_scene.append(nll)\n nll_scene_mean = np.mean(nll_scene)\n return nll_scene_mean, gauss_scene\n","repo_name":"boschresearch/the-atlas-benchmark","sub_path":"src/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":11305,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"62"} +{"seq_id":"72645404998","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Board, Comment\nfrom django.utils import timezone\nfrom .forms import BoardForm, CommentForm, SearchForm\n\ndef main(request):\n boards = Board.objects\n return render(request, 'Userboard_main.html', {'boards':boards})\n\ndef warning(request):\n return render(request, 'Userboard_warning.html')\n\ndef detail(request, board_id):\n board_detail = get_object_or_404(Board, pk=board_id)\n comments = Comment.objects.filter(board_id=board_id)\n if request.method == 'POST':\n comment_form=CommentForm(request.POST)\n if comment_form.is_valid():\n post = comment_form.save(commit=False)\n post.author = request.user.username\n post.board_id = board_id\n post.save()\n return redirect('/userboard/detail/' + str(board_id))\n else:\n comment_form=CommentForm()\n context = {\n 'board_detail' : board_detail,\n 'comments' : comments,\n 'comment_form' : comment_form\n }\n return render(request, 'Userboard_detail.html', context)\n\n@login_required\ndef new(request):\n if request.method == 'POST':\n form = BoardForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n \n if request.user.is_authenticated:\n post.author = request.user.username\n else:\n post.author = \"unknown\"\n\n post.save()\n return redirect('/userboard/detail/' + str(post.id))\n else:\n form = BoardForm()\n return render(request, 'Userboard_new.html', {'form':form})\n\n\n@login_required\ndef update(request, board_id):\n board=get_object_or_404(Board, pk=board_id)\n if request.method == 'POST':\n form = BoardForm(request.POST, request.FILES)\n if form.is_valid():\n board.title = form.cleaned_data['title']\n board.text = form.cleaned_data['text']\n board.image = form.cleaned_data['image']\n if board.author == request.user.username:\n board.save()\n return redirect('/userboard/detail/' + str(board_id))\n else:\n return redirect('/userboard/warning')\n else:\n form = BoardForm(instance=board)\n return render(request, 'Userboard_update.html', {'form':form}) \n\n@login_required\ndef delete(request, board_id):\n if request.method == 'POST':\n board = Board.objects.get(pk=board_id)\n if board.author == request.user.username:\n board.delete()\n return render(request, 'Userboard_delete.html')\n else:\n return redirect('/userboard/warning')\n elif request.method == 'GET':\n return HttpResponse('잘못된 접근입니다.')\n\ndef searchtitle(request):\n qs = Board.objects.all()\n q = request.GET.get('q', '')\n if q:\n result = qs.filter(title__icontains=q)\n return render(request, 'Userboard_main.html', {'boards':result, 'q':q})\n else:\n return redirect('userboard_main')\n\ndef searchtext(request):\n qs = Board.objects.all()\n q = request.GET.get('q', '')\n if q:\n result = qs.filter(text_icontains=q)\n return render(request, 'Userboard_main.html', {'boards':result, 'q':q})\n else:\n return redirect('userboard_main')\n\n","repo_name":"Leenamkyu/TMI","sub_path":"TMI/AppUserboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29417704346","text":"import pygame \nfrom pygame.locals import * \nimport util\nimport colors\n\nclass Display:\n\n\tdef __init__(self):\n\n\t\tpygame.font.init()\n\t\tself.myfont = pygame.font.SysFont('helvetica', 30)\n\t\t\n\t\tself.background_blits = []\n\t\tself.foreground_blits = ['ball']\n\n\t\tself.scaled_blits = util.load_scaled_blits()\n\t\tself.blit_locations = util.initial_blit_locations()\n\t\tself.screen = pygame.display.set_mode((util.screen_width, util.screen_height))\n\n\t\tself.explosion_counters = {} # Store the number of frames we've seen each explosion\n\t\n\t\t# Since many xwing ids may map to the same sprite, add the mapping here\n\t\t# As a bonus, if we want to change the orientation, we can do so by modifying this map\n\t\tself.xwing_sprite_map = {}\n\n\tdef exit_credits(self, state):\n\t\tclock = pygame.time.Clock()\t\t\n\t\tcredits_text = 'Congratulations!\\n\\nYou hit %d Rebel ships,\\n\\nearning a score of %d!\\n\\n\\n\\nCreated by Will Lauer,\\n\\nwith special thanks to Thomas Lauer\\n\\n\\nfor bailing his brother out when it came to working with Gimp.\\n\\n\\nThank you for playing!' % (state['score'], state['score'] // 10)\n\t\tcredits = self.myfont.render(credits_text, False, colors.white)\n\t\tfor i in range(util.screen_height - 1, 0 - util.screen_height, -1):\n\t\t\tself.screen.blit(credits, (10, i))\n\t\t\tpygame.display.update()\n\t\t\tclock.tick(60)\n\t\t\n\t\t\t\t\n\tdef update_ball_position(self, state):\n\t\t'''\n\t\tPygame wants the column coordinate to come before the row coordinate\n\t\t'''\n\t\tself.blit_locations['ball'] = util.ball_corner(state['y'], state['x'])\t\n\n\tdef update_lasers(self, state):\n\t\t'''\n\t\tIterate through all lasers in the game state, deleting ones that are now out of bounds,\n\t\tand updating the positions of all the others\n\t\t'''\n\t\tlasers = state['lasers']\n\t\tnew_lasers = lasers.copy()\n\t\tfor k in lasers:\n\t\t\tif lasers[k][0] < 0 or lasers[k][0] > util.screen_height \\\n\t\t\tor lasers[k][1] < 0 or lasers[k][1] > util.screen_width:\n\t\t\t\t# The laser is out of bounds and should be deleted\n\t\t\t\tnew_lasers.pop(k, None) # both from the lasers\n\t\t\t\tself.background_blits.remove(k)\n\t\t\telse:\n\t\t\t\t# The laser is still in bounds and should be included\n\t\t\t\tx, y, vx, vy = lasers[k]\n\t\t\t\tnew_x, new_y = x + vx * util.time_step, y + vy * util.time_step\n\t\t\t\tnew_lasers[k] = new_x, new_y, vx, vy\n\t\t\t\tself.blit_locations[k] = new_y, new_x\n\n\t\tstate['lasers'] = new_lasers\n\t\t\n\t'''\n\tFor each xwing, check its velocity and orientation and if necessary,\n\tchange the orientation sprite so that the two conform\n\t'''\n\tdef update_orientation(self, xwing, orientation):\n\t\tself.xwing_sprite_map[xwing] = self.scaled_blits['xwing_' + str(orientation)]\n\n\tdef update_xwings(self, state):\n\t\t\n\t\t# Update the position of each of the xwings\n\t\txwings = state['xwings']\n\t\tnew_xwings = xwings.copy()\n\t\tfor xw in xwings:\n\t\t\tif xwings[xw][1] < 0 or xwings[xw][1] > util.screen_height \\\n\t\t\tor xwings[xw][2] < 0 or xwings[xw][2] > util.screen_width:\n\t\t\t\t# The xwing is out of bounds and should be deleted\n\t\t\t\tnew_xwings.pop(xw, None)\n\t\t\t\tself.foreground_blits.remove(xw)\n\t\t\telse:\n\t\t\t\t# The xwing is still in bounds and should be included\n\t\t\t\to, x, y, vx, vy = xwings[xw]\n\t\t\t\tnew_x, new_y = x + vx * util.time_step, y + vy * util.time_step\n\t\t\t\tself.blit_locations[xw] = new_y, new_x\n\n\t\tstate['xwings'] = new_xwings\n\n\t# We had a collision, so run the explosion animation and remove the xwing from the blits\t\n\tdef boom(self, xwing_id):\n\t\tself.foreground_blits.remove(xwing_id)\n\t\tself.foreground_blits.append('e' + str(xwing_id[1:])) # Explosions enumerated e0, e1, e...\n\t\tself.explosion_counters['e' + str(xwing_id[1:])] = 0\n\n\n\tdef remove_laser(self, laser_id):\n\t\tself.background_blits.remove(laser_id)\n\n\t# Just add the corresponding id to the blit dicts\n\tdef add_xwing(self, i, orientation):\n\t\tself.foreground_blits.append(i)\n\t\tself.xwing_sprite_map[i] = self.scaled_blits['xwing_' + str(orientation)]\n\t\t\t\n\tdef add_laser(self, i):\n\t\tself.background_blits.append(i)\n\n\tdef update_health(self, state):\n\t\thealth_per_hit = util.screen_width // 100\n\t\thealth_bar_width = health_per_hit * state['health']\n\t\tpygame.draw.rect(self.screen, colors.blue, [util.health_x, util.health_y, health_bar_width, 50], 0)\n\n\tdef update_score(self, state):\n\t\ttext = self.myfont.render('Score: ' + str(state['score']), False, colors.white)\n\t\tself.screen.blit(text, util.score_coords)\n\n\tdef update(self, state):\n\t\t'''\n\t\tTake in the state object stored by Game, and update blits\n\t\tStart off by just updating the ball location\n\t\t'''\n\n\t\tself.update_ball_position(state)\n\t\tself.update_lasers(state)\n\t\tself.update_xwings(state)\n\n\t\tself.screen.fill((colors.black)) # Fill the background\n\t\t# self.screen.blit(self.scaled_blits['background'], (-100, -50))\n\t\tself.update_health(state) # Gotta come after screen.fill, otherwise its covered up\n\t\tself.update_score(state)\n\n\t\t# Blit everything with updated locations\n\t\tfor name in self.background_blits:\n\t\t\tif name[0] == 'l':\n\t\t\t\tself.screen.blit(self.scaled_blits['laser'], self.blit_locations[name])\n\t\t\telse:\n\t\t\t\tself.screen.blit(self.scaled_blits[name], self.blit_locations[name])\n\t\tfor name in self.foreground_blits:\n\t\t\tif 'e' in name:\n\t\t\t\tif 'x' + str(name[1:]) not in self.blit_locations:\n\t\t\t\t\tcontinue\n\t\t\t\tself.screen.blit(self.scaled_blits['explosion'], self.blit_locations['x' + str(name[1:])])\n\t\t\t\tself.explosion_counters[name] += 1\n\t\t\telif 'x' in name:\n\t\t\t\t# We are adding an xwing, so need to find the proper sprite\n\t\t\t\tsprite = self.xwing_sprite_map[name]\n\t\t\t\tblit_loc = self.blit_locations[name]\n\t\t\t\tself.screen.blit(self.xwing_sprite_map[name], self.blit_locations[name])\n\t\t\telse:\n\t\t\t\tself.screen.blit(self.scaled_blits[name], self.blit_locations[name])\n\n\t\t# Make it visible\n\t\tpygame.display.flip()\n\n\t\t# Remove explosions after their duration is up\n\t\tnew_explosion_counters = {}\n\t\tfor e in self.explosion_counters:\n\t\t\tif self.explosion_counters[e] == util.explosion_duration:\n\t\t\t\tself.foreground_blits.remove(e)\n\t\t\telse:\n\t\t\t\tnew_explosion_counters[e] = self.explosion_counters[e]\n\t\tself.explosion_counters = new_explosion_counters\n\n\t\t\t\n","repo_name":"willlauer/StarWarsGame","sub_path":"src/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17258331372","text":"# move.py\n# 1. 导入gdb模块来访问gdb提供的python接口\nimport gdb\nimport re\n\nmap_re = re.compile('\\[([0-9]*)\\] = (0x[0-9a-f]*)')\n\n# 2. 用户自定义命令需要继承自gdb.Command类\nclass Move(gdb.Command):\n\n # 3. docstring里面的文本是不是很眼熟?gdb会提取该类的__doc__属性作为对应命令的文档\n \"\"\"Move breakpoint\n Usage: jack old_breakpoint_num new_breakpoint\n Example:\n (gdb) jack 1 binary_search -- move breakpoint 1 to `b binary_search`\n \"\"\"\n\n def __init__(self):\n # 4. 在构造函数中注册该命令的名字\n super(self.__class__, self).__init__(\"jack\", gdb.COMMAND_USER)\n\n # 5. 在invoke方法中实现该自定义命令具体的功能\n # args表示该命令后面所衔接的参数,这里通过string_to_argv转换成数组\n def invoke(self, args, from_tty):\n all_monster = gdb.parse_and_eval('monster_manager_all_monsters_id')\n a = str(all_monster)\n b = map_re.findall(a)\n for ite in b:\n key = ite[0]\n value = ite[1]\n scene_id = gdb.parse_and_eval('((monster_struct *)' + value + ')->data->scene_id')\n if scene_id != 10000:\n continue\n print(\"key = %s, value = %s\" % (key, value))\n gdb.execute('p ((monster_struct *)' + value + ')->data->player_id')\n \n# print(\"monster len = %d\" % len(a))\n# print(a)\n# print(type(a))\n# argv = gdb.string_to_argv(args)\n# if len(argv) <= 0:\n# raise gdb.GdbError('输入参数数目不对,help jack以获得用法')\n # 6. 使用gdb.execute来执行具体的命令\n# for ite in argv:\n# gdb.execute('p ((monster_struct *)' + ite + ')->data->move_path')\n# path = gdb.parse_and_eval('((monster_struct *)' + ite + ')->data->move_path')\n# print(path['pos']['pos_x'])\n# gdb.write(a.string())\n# yield(path['pos'])\n \n \n# gdb.execute('delete ' + argv[0])\n# gdb.execute('break ' + argv[1])\n\n# 7. 向gdb会话注册该自定义命令\nMove()\n","repo_name":"a1406/setting","sub_path":"gdbpy/mv.py","file_name":"mv.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"27675386056","text":"import random\nimport string\n\nfrom django.utils.text import slugify\nfrom django.contrib.auth import get_user_model\n\n'''\nrandom_string_generator is located here:\nhttp://joincfe.com/blog/random-string-generator-in-python/\n'''\n\nDONT_USE = ['create']\n\n\ndef random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\n# def random_unique_integer_generator(instance, new_user_id=None):\n# if new_user_id is not None:\n# user_id = new_user_id\n# else:\n# user_id = random.randint(10000, 99999)\n# Klass = instance.__class__\n# qs_exists = Klass.objects.filter(user_id=user_id).exists()\n# if qs_exists:\n# new_user_id = random.randint(10000, 99999)\n# return unique_integer_generator(instance, new_user_id=new_user_id)\n# return user_id\n\n\ndef unique_integer_generator(instance, new_user_id=None):\n if new_user_id is not None:\n user_id = new_user_id\n else:\n user_id = get_user_model().objects.last().user_id + 1\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(user_id=user_id).exists()\n if qs_exists:\n new_user_id = user_id + 1\n return unique_integer_generator(instance, new_user_id=new_user_id)\n return user_id\n\n\ndef unique_slug_generator(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance \n has a model with a slug field and a title character (char) field.\n \"\"\"\n if new_slug is not None:\n slug = new_slug\n else:\n slug = slugify(instance.name)\n\n if slug in DONT_USE:\n new_slug = \"{randstr}\".format(\n randstr=random_string_generator(size=5)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{randstr}\".format(\n randstr=random_string_generator(size=5)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n return slug\n\n\ndef unique_slug(size):\n return \"{randstr}\".format(\n randstr=random_string_generator(size=size)\n )\n","repo_name":"mahmoud-batman/quizz-app","sub_path":"backend/core/utils/unique_slug.py","file_name":"unique_slug.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5756690469","text":"import json\nimport fhirclient.models.bundle\nimport fhirclient.models.patient as pa\nfrom pprint import pprint\n\n\ndef get_patient_resource(bundle):\n \"\"\" 40 points, 38 for correctness 2 for proficiency\n Parses a FHIR bundle for a patient resource\n\n Arguments:\n bundle {dict} -- Python dictionary representing a JSON FHIR bundle\n\n Returns:\n [dict] -- Python dictionary representing the JSON Patient FHIR resource\n \"\"\"\n entry = bundle['entry']\n for e in entry:\n if e['resource']['resourceType'] == 'Patient':\n return e['resource']\n\n\ndef get_patient_ssn(patient_bundle):\n \"\"\" 20 points, 18 for correctness 2 for proficiency\n Parses the patient bundle and returns the social security number as a string\n \n Arguments:\n\n patient_bundle {dict} -- Python dictionary representing the JSON Patient FHIR resource\n \n Returns:\n string -- the patient's social security number\n \"\"\"\n id = patient_bundle['identifier']\n for map in id:\n if 'type' in map.keys():\n if map['system'] == 'http://hl7.org/fhir/sid/us-ssn':\n return map['value']\n\n\ndef get_patient_name(patient_bundle):\n \"\"\" 20 points, 18 for correctness 2 for proficiency\n Parses the patient bundle and returns a string formatted as\n e.g. John James Doe\n \n Arguments:\n patient_bundle {dict} -- Python dictionary representing the JSON Patient FHIR resource\n \n Returns:\n string -- the formatted patient name\n \"\"\"\n names = patient_bundle['name']\n for name in names:\n if (name['use'] == 'official'):\n str = ''\n for g in name['given']:\n str += g + ' '\n str += name['family']\n # 'Rita460 Schowalter414'??\n return str\n\n\ndef get_patient_race(patient_resource):\n \"\"\" 10 points, 8 for correctness 2 for proficiency\n Parses a Patient resource and returns race details\n \n Arguments:\n patient_resource {dict} -- Python dictionary representing the JSON Patient FHIR resource\n \n Returns:\n tuple -- Tuple containing the system, code, and display\n \"\"\"\n for ext in patient_resource['extension']:\n if (ext['url'] == 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-race'):\n for map in ext['extension']:\n if 'valueCoding' in map:\n vc = map['valueCoding']\n my_tuple = (vc['system'], vc['code'], vc['display'])\n return my_tuple\n\n\ndef get_patient_birth_place(patient_resource):\n \"\"\" 10 points, 8 for correctness 2 for proficiency\n Parses a Patient resource and returns birth place\n \n Arguments:\n patient_resource {dict} -- Python dictionary representing the JSON Patient FHIR resource\n \n Returns:\n tuple -- Tuple containing the city, state, and country\n \"\"\"\n #'C:/Users/91593/Desktop/Python/synthea/output/fhir/Albina13_Stehr398_a086ee39c8d24cd48fe1f5367e80a370.json'\n for map in patient_resource['extension']:\n if ('valueAddress' in map):\n birth_place = map['valueAddress']\n my_tuple = (birth_place['city'], birth_place['state'], birth_place['country'])\n return my_tuple\n\n\ndef load_bundle(fhir_bundle_path):\n with open(fhir_bundle_path) as f:\n bundle = json.loads(f.read())\n return bundle\n\n\nif __name__ == \"__main__\":\n bundle = load_bundle('data/fhir/Albina13_Stehr398_a086ee39-c8d2-4cd4-8fe1-f5367e80a370.json')\n\n patient = get_patient_resource(bundle)\n print(\"***** Patient\\n\", patient, \"\\n\")\n\n ssn = get_patient_ssn(patient)\n print(\"***** Patient SSN\\n\", ssn, \"\\n\")\n\n name = get_patient_name(patient)\n print(\"***** Patient Name\\n\", name, \"\\n\")\n\n system, code, display = get_patient_race(patient)\n print(\"*** Patient Race\\n\", system, code, display, \"\\n\")\n\n birth_place_city, birth_place_state, birth_place_country = get_patient_birth_place(patient)\n print(\"*** Patient Birth Place\\n\", birth_place_city, birth_place_state, birth_place_country, \"\\n\")","repo_name":"shitterlmj2016/95888_Data_Focused_Python","sub_path":"A1-2/assignment02_student.py","file_name":"assignment02_student.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36143726641","text":"import requests\nfrom requests.exceptions import HTTPError\nimport regex\nimport sys\nimport os\nimport zipfile36 as zipfile\nfrom glowingwaffle.data import ReadData\nimport pandas as pd\nfrom itertools import chain\nimport numpy as np\n\npd.set_option('mode.chained_assignment', None)\n# TODO: Extend this also to the AER, maybe??\n\nclass ScrapeOGC:\n\n def __init__(self, folder=None, urls=None):\n self.urls = list()\n self.file_names = list()\n self.output_folder = folder\n self.wa_num = list()\n self.urls = urls\n self.dataframes_dict = {}\n self.feature_list = None\n self.removal_list = list()\n self.removal_wells = list()\n self.multiple_wells = list()\n self.multiple_names = list()\n self.multiple_list = ['compl_ev.csv', 'dst.csv', 'pst_dtl.csv', 'dst_rate.csv', 'perf_net_interval.csv', 'zone_prd_2016_to_present.csv', 'zone_prd_2007_to_2015.csv', 'BC Total Production.csv']\n\n if folder is not None and not os.path.exists(folder):\n # create a folder if the folder does not currently exist\n try:\n os.mkdir(folder)\n except OSError as err:\n sys.exit(f'Error Occurred creating directory: {err}')\n\n def download_data_url(self, file_names=None, force_download=False):\n \"\"\"\n Download the CSV data from the URLS given in the list of URLS, while we are currently using this for\n the data from the Oil and Gas Council of BC, this can be extended to any file that has a URL, including\n none CSV data. If no folder supplied, the file will be downloaded to the Current Working Directory\n\n Parameters\n ----------\n self\n\n Returns\n -------\n None\n \"\"\"\n\n # Check if the necessary files have already been downloaded\n dont_download_flag = True\n\n # Have the ability to force the program to download in order to update the files that may already exist in\n # the folder\n if force_download:\n dont_download_flag = False\n\n for key in file_names:\n if not os.path.exists(os.path.join(self.output_folder, key)):\n dont_download_flag = False\n\n if dont_download_flag:\n print(f\"All files already exist in the folder: {self.output_folder}, continuing to read in the data to \"\n f\"data frames \\n\")\n return\n\n # loop over the requested urls to download the files to the computer\n for idx, dlurls in enumerate(self.urls):\n\n try:\n # Perform a GET request for the files listed in the URLS in the urls list\n print(f\"Downloading file #{idx + 1} of {len(self.urls)} with url: {dlurls}\")\n response = requests.get(dlurls, allow_redirects=True)\n\n # If the response was successful, no Exception will be raised\n response.raise_for_status()\n except HTTPError as http_err:\n sys.exit(f'HTTP error occurred while downloading file: {http_err}')\n except Exception as err:\n sys.exit(f'Other error occurred while downloading file: {err}')\n else:\n # open the file and save it to a location\n # this is specific to the OGC header data, so be careful when extending\n if 'content-disposition' in response.headers.keys():\n output_filename = regex.search(r\"filename=\\\"([^']*)\\\";\",\n response.headers['Content-Disposition']).group(1)\n else:\n output_filename = dlurls.split('/')[-1]\n\n if self.output_folder is not None:\n # gives the full path of the file for writing and saving\n output_filename = os.path.join(self.output_folder, output_filename)\n\n try:\n f = open(output_filename, 'wb')\n # check if we were able to open the file\n except OSError:\n sys.exit(f\"Could not open the file: {output_filename}\")\n with f:\n # write the content of the get request to the file that was opened\n f.write(response.content)\n\n # save the filename to the list of the OGCData option\n self.file_names.append(output_filename)\n\n print(\"Finished Downloading the files from OGC\")\n\n print(\"Unzipping any Zipped downloads\")\n self.unzip_folders()\n\n def unzip_folders(self):\n \"\"\"\n Extract zip files if they were downloaded during the scraping from the OGC Website\n\n Parameters\n ----------\n self\n\n Returns\n -------\n None\n \"\"\"\n # Loop through all of the downloaded files from OGC\n for idx, files in enumerate(self.file_names):\n # Check if the file is in fact a zip file\n if zipfile.is_zipfile(files):\n print(f\"Unzipping {files}\")\n\n zf = zipfile.ZipFile(files, 'r')\n\n # Extract the zip file into the specified folder\n zf.extractall(self.output_folder)\n zf.close()\n\n # Delete the zip files now that we have extracted them\n os.remove(files)\n\n def find_in_data_frames_dict(self, file_name=None, list_of_values=None, column=\"\"):\n \"\"\"\n\n Parameters\n ----------\n file_name string\n list_of_values list\n column string\n\n Returns\n -------\n df: pandas dataframe\n \"\"\"\n\n # types of well authorization number header\n\n if column == \"\":\n WA_HEADER_NAMES = ['Wa Num', 'WA Num', 'Wa_num', 'WA_num', 'WA_NUM', 'WA_Num', 'Wa_Num', 'WA NUM',\n 'WA Number', 'Well Authorization Number']\n\n for header_names in self.dataframes_dict[file_name].columns:\n for wa_names in WA_HEADER_NAMES:\n if header_names == wa_names:\n column = header_names\n break\n\n if not column == \"\":\n break\n\n if column == \"\":\n sys.exit(f'Error Occurred, could not find a well authorization header in: {file_name}, please check the file')\n\n df = self.dataframes_dict[file_name].loc[self.dataframes_dict[file_name][column].isin(list_of_values)]\n\n return df, column\n\n def find_well_names(self, area_code=None, formation_code=None):\n \"\"\"\n Find all of the well names and UWI identifiers in the areas or formations that are defined\n :param area_code: list, required (default = None)\n The list of OGC area codes that are areas of interest to grab wells from\n :param formation_code: list, required (default = None)\n The list of formation codes that are of interest for the model to grab wells from\n :return:\n \"\"\"\n\n # Since this is the first step, use it to read in the data to the OGC data object\n training_data = ReadData()\n\n training_data.read_csv_folder(self.output_folder)\n\n self.dataframes_dict = training_data.pd_dict\n\n # Check the 'Fracture Fluid Data.csv' data for\n\n file_list = ['zone_prd_2016_to_present.csv', 'zone_prd_2007_to_2015.csv']\n tmp_prod_df = list()\n\n print(\"finding well names....\")\n for idx, file in enumerate(file_list):\n df1, _ = self.find_in_data_frames_dict(file_name=file, list_of_values=area_code, column='Area_code')\n\n df2, _ = self.find_in_data_frames_dict(file_name=file, list_of_values=formation_code, column='Formtn_code')\n\n tmp_prod_df.append(pd.concat([df1, df2]))\n\n self.wa_num.append(pd.concat(tmp_prod_df)['Wa_num'].to_list())\n\n total_prod_file = 'BC Total Production.csv'\n\n df1, _ = self.find_in_data_frames_dict(file_name=total_prod_file, list_of_values=area_code, column='Area Code')\n\n df2, _ = self.find_in_data_frames_dict(file_name=total_prod_file, list_of_values=formation_code,\n column='Formtn Code')\n\n df3 = pd.concat([df1, df2])\n\n self.wa_num.append(df3['Well Authorization Number'].to_list())\n\n print(\"found well names....\")\n\n self.wa_num = list(chain.from_iterable(self.wa_num))\n # Remove duplicates from the list\n self.wa_num = list(set(self.wa_num))\n\n self.feature_list = pd.DataFrame(self.wa_num, columns=['Well Authorization Number'])\n\n \n \n def read_well_data(self, file_name=None):\n \"\"\"\n\n Parameters\n ----------\n self\n\n file_name: dictionary that has the file and headers needed for the data object\n\n Returns\n -------\n\n \"\"\"\n # grab the dictionary entry for the file and filter it for the well authorization number list\n\n for key in file_name:\n filtered_df, wa_col = self.find_in_data_frames_dict(file_name=key, list_of_values=self.wa_num)\n\n # remove the columns from the header list in the dictionary\n file_name[key].append(wa_col)\n\n filtered_df = filtered_df.loc[:, file_name[key]]\n\n # rename WA Num to Well Authorization Number to match the other data frame with all of the wells\n filtered_df = filtered_df.rename(columns={wa_col: \"Well Authorization Number\"})\n\n if key in self.multiple_list:\n self.multiple_wells.append(filtered_df)\n self.multiple_names.append(key)\n else:\n if key == 'hydraulic_fracture.csv':\n filtered_df['FRAC STAGE NUM'].replace(\"DFIT\", 0, inplace =True)\n filtered_df['FRAC STAGE NUM'].replace(\"Dfit\", 0, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"9b\", 9, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"9B\", 9, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"9A\", 9, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"9a\", 9, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"8A\", 8, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"7A\", 7, inplace=True)\n filtered_df['FRAC STAGE NUM'].replace(\"1b\", 1, inplace=True)\n\n\n\n self.feature_list = pd.merge(self.feature_list, filtered_df, how=\"left\",\n on=['Well Authorization Number'])\n \n def calc_well_design(self, multiple_loc=-1):\n\n \"\"\"\n Function to calculate well design features from feature_list\n Features are then added to feature_list\n\n \"\"\"\n #create dataframe on cluster level from fields of interest in feature list\n\n # dfCluster = self.feature_list['Well Authorization Number',\n # 'PERF STAGE NUM',\n # 'INTERVAL TOP DEPTH (m)',\n # 'INTERVAL BASE DEPTH (m)',\n # 'Formtn_code',\n # 'Tvd_formtn_top_depth ',\n # 'Compltn_top_depth',\n # 'Compltn_base_depth',]\n\n #create dataframes on stage and well level\n dfStage = pd.DataFrame()\n dfWell = pd.DataFrame()\n dfCluster = pd.DataFrame()\n\n #Agg Cluster values to calculate number of clusters in a stage\n dfStage = self.multiple_wells[multiple_loc].groupby(['Well Authorization Number','PERF STAGE NUM']).agg({'INTERVAL TOP DEPTH (m)':'count'})\n dfStage.rename(columns = {'INTERVAL TOP DEPTH (m)':'Cluster Count'}, inplace =True)\n #Agg cluste values to determine max and min depths and total stages\n dfWell = self.multiple_wells[multiple_loc].groupby('Well Authorization Number').agg({'INTERVAL BASE DEPTH (m)':np.max,\n 'INTERVAL TOP DEPTH (m)':np.min,\n 'PERF STAGE NUM':'nunique'})\n dfWell.rename(columns = {'INTERVAL BASE DEPTH (m)':'Well Length',\n 'INTERVAL TOP DEPTH (m)': 'Heel Perf Depth',\n 'PERF STAGE NUM': 'Number of Stages'}, inplace = True)\n #Calculate completed length from the max and min clusters\n dfWell['Completed Length'] = dfWell['Well Length']-dfWell['Heel Perf Depth']\n\n\n dfCluster['Stage Length'] = self.feature_list['Compltn_base_depth'] - self.feature_list['Compltn_top_depth']\n\n #Merge in stage calc to cluster dataframe\n dfCluster = pd.merge(dfCluster, dfStage,\n on = ['Well Authorization Number','PERF STAGE NUM'],\n how = 'left')\n #Calc cluster spacing based on stage length and number of clusters\n dfCluster['Cluster Spacing'] = dfCluster['Stage Length']/(dfCluster['Cluster Count']-1)\n\n #Agg clusters to well level and take in median to avoid outliers and merge to well value\n dfClusterPivot = dfCluster.pivot_table(index = ['Well Authorization Number'],\n values = ['Stage Length','Cluster Spacing','Cluster Count'],\n aggfunc = np.median)\n dfWell = pd.merge(dfWell, dfClusterPivot, on = 'Well Authorization Number')\n\n #Find wells as single point entry and adjust cluser spacing to be stage spacing\n dfWell.loc[(dfWell['Cluster Count'] < 2), 'Cluster Spacing'] = dfWell['Completed Length']/dfWell['Number of Stages']\n\n #Drop heel perf depth\n dfWell = dfWell.drop(['Heel Perf Depth'])\n #Merge well dataframe to feature list\n self.feature_list = pd.merge(self.feature_list, dfWell,\n on = 'Well Authorization Number',\n how = 'left')\n\n def determine_frac_type(self):\n \"\"\"\n Use the feature list to determine the frac type and add them to the feature_list, then remove values needed\n for this determination from the feature_list\n\n Returns\n -------\n\n \"\"\"\n\n print(\"Determining FRAC TYPE\")\n\n self.feature_list.loc[\n self.feature_list['CHARGE TYPE'].isnull() & self.feature_list['CHARGE SIZE (g)'].isnull() &\n self.feature_list['SHOTS PER METER'].isnull() &\n self.feature_list['DEGREE OF PHASING'].isnull(), \"FRAC TYPE\"] = 'Frac Sleeve'\n\n self.feature_list.loc[self.feature_list['PERF COMMENTS'] == \"AbrasiveJet\", \"FRAC TYPE\"] = 'Frac Sleeve'\n self.feature_list.loc[self.feature_list['PERF COMMENTS'] == \"Burst Disc\", \"FRAC TYPE\"] = 'Frac Sleeve'\n self.feature_list.loc[self.feature_list['PERF COMMENTS'] == \"Toe Port\", \"FRAC TYPE\"] = 'Frac Sleeve'\n self.feature_list.loc[self.feature_list['PERF COMMENTS'] == \"Cemented Sleeve\", \"FRAC TYPE\"] = 'Frac Sleeve'\n\n self.feature_list.loc[self.feature_list['FRAC TYPE'].isnull(), \"FRAC TYPE\"] = 'Plug and Perf'\n\n self.removal_list.append('PERF COMMENTS')\n self.removal_list.append('TOTAL CO2 PUMPED (m3)')\n self.removal_list.append('TOTAL N2 PUMPED (scm)')\n self.removal_list.append('TOTAL CH4 PUMPED (e3m3)')\n self.removal_list.append('Proppant Total Sum')\n self.removal_list.append('PERF STAGE NUM')\n self.removal_list.append('TOTAL FLUID PUMPED (m3)')\n self.removal_list.append('PROPPANT TYPE1 PLACED (t)')\n self.removal_list.append('PROPPANT TYPE2 PLACED (t)')\n self.removal_list.append('PROPPANT TYPE3 PLACED (t)')\n self.removal_list.append('PROPPANT TYPE4 PLACED (t)')\n\n\n def calc_frac_props(self):\n \"\"\"\n\n Returns\n -------\n\n \"\"\"\n print(\"Determining Proppant Total\")\n self.feature_list['Proppant Total'] = self.feature_list['PROPPANT TYPE1 PLACED (t)'] + \\\n self.feature_list['PROPPANT TYPE2 PLACED (t)'] + \\\n self.feature_list['PROPPANT TYPE3 PLACED (t)'] + \\\n self.feature_list['PROPPANT TYPE4 PLACED (t)']\n\n group = self.feature_list.groupby('Well Authorization Number')\n\n df2 = pd.DataFrame()\n\n print(\"Determining Proppant Total Sum\")\n df2['Proppant Total Sum'] = group.apply(lambda x: sum(x['Proppant Total']))\n\n print(\"Determining Lateral Length\")\n df2['Lateral Length'] = group.apply(\n lambda x: max(x['COMPLTN BASE DEPTH (m)']) - min(x['COMPLTN TOP DEPTH (m)']))\n\n print(\"Determining Average Treating Pressure\")\n df2['Average Treating Pressure'] = group.apply(lambda x: x['AVG TREATING PRESSURE (MPa)'].mean())\n\n print(\"Determining Average Injection Rate\")\n df2['Average Injection Rate'] = group.apply(lambda x: x['AVG RATE (m3/min)'].mean())\n\n print(\"Determining Frac Gradient (kPa/m)\")\n df2['FRAC GRADIENT (KPa/m)'] = group.apply(lambda x: x['FRAC GRADIENT (KPa/m)'].mean())\n\n print(\"Determining Tonnage Per Metre\")\n df2['Tonnage per m'] = df2['Proppant Total Sum'] / df2['Lateral Length']\n df2['Tonnage per m'] = df2['Tonnage per m'].round(2)\n\n print(\"Determining Energzier\")\n\n # TODO: Check what the Energizer should be with multiple Energizer types for different stages, if there is a stage w/o None, use that and move on\n df2['Energizer'] = group.apply(lambda x: x['ENERGIZER'].value_counts().index.tolist()[0] if len(x['ENERGIZER'].value_counts().index.tolist()) > 0 else \"None\")\n df2['Energizer Type'] = group.apply(lambda x: x['ENERGIZER TYPE'].value_counts().index.tolist()[0] if len(x['ENERGIZER TYPE'].value_counts().index.tolist()) > 0 else \"None\")\n\n print(\"Determining Fluid Pumped (m3)\")\n df2['Total Fluid Pumped (m3)'] = group.apply(lambda x: sum(x['TOTAL FLUID PUMPED (m3)']))\n\n print(\"Determining Fluid Per Metre\")\n df2['Fluid per m'] = df2['Total Fluid Pumped (m3)'] / df2['Lateral Length']\n df2['Fluid per m'] = df2['Fluid per m'].round(2)\n\n print(\"Determining Tonnage Per fluid m3\")\n df2['Tonnage per m3'] = df2['Proppant Total Sum']/df2['Total Fluid Pumped (m3)']\n df2['Tonnage per m3'] = df2['Fluid per m'].round(2)\n\n print(\"Determining CO2 Pumped (m3)\")\n df2['Total CO2 Pumped (m3)'] = group.apply(lambda x: sum(x['TOTAL CO2 PUMPED (m3)']))\n\n print(\"Determining N2 Pumped (scm)\")\n df2['Total N2 Pumped (scm)'] = group.apply(lambda x: sum(x['TOTAL N2 PUMPED (scm)']))\n\n print(\"Determining Total CH4 Pumped (e3m3)\")\n df2['Total CH4 Pumped (e3m3)'] = group.apply(lambda x: sum(x['TOTAL CH4 PUMPED (e3m3)']))\n df2 = df2.reset_index()\n\n df2 = df2.drop(['Lateral Length'], axis=1)\n\n self.feature_list = pd.merge(self.feature_list, df2, how=\"left\", on=['Well Authorization Number'])\n\n # TODO: create a list of things to remove from the feature_list\n #self.removal_list.append('PERF COMMENTS')\n\n def fill_feature_list_nan_with_val(self, columns=list(), val=0):\n \"\"\"\n fill the columns in the list with the value given instead of nan\n\n Parameters\n ----------\n columns\n val\n\n Returns\n -------\n\n \"\"\"\n\n for column in columns:\n self.feature_list[column].replace(np.nan, val, inplace=True)\n\n def convert_string_inputs_to_none(self, string_list):\n\n for column in string_list:\n\n self.feature_list[column].replace(np.nan, 'NONE', inplace=True)\n list_of_strings = self.feature_list[column].to_list()\n cleaned_list = list(set(list_of_strings))\n\n for idx, newvals in enumerate(cleaned_list):\n self.feature_list[column].replace(newvals, idx, inplace=True)\n\n\n def remove_columns(self):\n \"\"\"\n Remove extra columns that were added to the feature list for the final stage before training\n\n Parameters\n ----------\n columns\n val\n\n Returns\n -------\n\n \"\"\"\n\n for column in self.removal_list:\n self.feature_list = self.feature_list.drop([column], axis=1)\n\n non_null_columns = [col for col in self.feature_list.columns if self.feature_list.loc[:, col].notna().any()]\n self.feature_list = self.feature_list[non_null_columns]\n\n def remove_wells(self):\n self.feature_list = self.feature_list[~self.feature_list['Well Authorization Number'].isin(self.removal_wells)]\n\n def print_feature_list_to_csv(self):\n self.feature_list.to_csv(\"feature_list.csv\")\n\n def aggregate_multiple_wells(self):\n\n for idx, df in enumerate(self.multiple_list):\n # aggregate the values in the well list, this has to be different for each of the files though for\n # the averaging technique\n\n if self.multiple_names[idx] == \"perf_net_interval.csv\":\n self.calc_well_design(idx)\n else:\n pass\n\n def calc_ip90_ip180(self):\n\n # loop over the three well production files to determine the ip90 and ip180 for each well\n\n df2 = pd.DataFrame(columns=['Well Authorization Number', 'IP90', 'IP180'])\n for idx, df in enumerate(self.multiple_list):\n if self.multiple_names[idx] =='zone_prd_2007_to_2015.csv':\n combined_df = self.multiple_wells[idx]\n elif self.multiple_names[idx] == 'zone_prd_2016_to_present.csv':\n combined_df.append(self.multiple_wells[idx])\n elif self.multiple_names[idx] == 'BC Total Production.csv':\n # rename the total production columns\n tmp_rename = self.multiple_wells[idx].rename(columns={\"Zone Prod Period\": \"Prod_period\", \"Oil Production (m3)\": \"Oil_prod_vol (m3)\", \"Gas Production (e3m3)\": \"Gas_prod_vol (e3m3)\", \"Condensate Production (m3)\": \"Cond_prod_vol (m3)\"})\n combined_df.append(tmp_rename)\n\n # convert and sum condensate, oil and gas prod\n combined_df['boe'] = combined_df['Oil_prod_vol (m3)'] + combined_df['Gas_prod_vol (e3m3)'] + combined_df['Cond_prod_vol (m3)']\n\n for well_num in self.wa_num:\n well_df = combined_df.loc[combined_df['Well Authorization Number'] == well_num]\n\n well_df.sort_values(by=[\"Prod_period\"])\n data = []\n data.append([well_num, 0.0, 0.0])\n df_ip = pd.DataFrame(data, columns=['Well Authorization Number', 'IP90', 'IP180'])\n\n # assume that each period is one month\n if (len(well_df) < 6):\n # not enough production data to use for ip90/ip180\n self.removal_wells.append(well_num)\n else:\n iptotal = well_df['boe'].sum()\n\n if (iptotal < 0.0001):\n # this is not an oil well\n self.removal_wells.append(well_num)\n else:\n\n ip90 = well_df.head(3)['boe'].sum()\n startindex = 0\n\n if (ip90 < 0.0001):\n # this well probably started on a different date, keep looking\n for index, row in well_df.iterrows():\n if (row['boe'] > 0.0001):\n startindex = index\n break\n\n df_ip['IP90'] = well_df.head(3 + startindex)['boe'].sum()\n df_ip['IP180'] = well_df.head(6 + startindex)['boe'].sum()\n\n df2 = df2.append(df_ip)\n\n # merge into the main feature list\n self.feature_list = pd.merge(self.feature_list, df2, how=\"left\", on=['Well Authorization Number'])\n\n def create_cleaned_feature_list(self):\n first_heads = ['Well Authorization Number',\n 'Surf Nad83 Lat',\n 'Surf Nad83 Long',\n 'CHARGE TYPE',\n 'VISCOSITY GEL TYPE',\n 'ENERGIZER',\n 'ENERGIZER TYPE',\n 'PROPPANT TYPE1',\n 'PROPPANT TYPE2',\n 'PROPPANT TYPE3',\n 'PROPPANT TYPE4',\n 'FRAC TYPE',\n 'Energizer',\n 'Energizer Type']\n\n min_heads = ['COMPLTN TOP DEPTH (m)']\n\n max_heads = [\n 'COMPLTN BASE DEPTH (m)',\n 'FRAC STAGE NUM',\n 'IP90',\n 'IP180',\n 'Total Fluid Pumped (m3)']\n\n average_heads = ['CHARGE SIZE (g)',\n 'SHOTS PER METER',\n 'DEGREE OF PHASING',\n 'AVG RATE (m3/min)',\n 'AVG TREATING PRESSURE (MPa)',\n 'FRAC GRADIENT (KPa/m)_x',\n 'Oil porsty',\n 'Gas porsty',\n 'Oil water satrtn',\n 'Gas water satrtn',\n 'Tvd oil net pay size',\n 'Tvd gas net pay size',\n 'Average Treating Pressure',\n 'Average Injection Rate',\n 'FRAC GRADIENT (KPa/m)_y',\n 'Fluid per m',\n 'Tonnage per m3']\n\n\n\n all_headers = first_heads + min_heads + max_heads + average_heads\n df = pd.DataFrame(columns=all_headers)\n\n for well_num in self.wa_num:\n\n if well_num not in self.removal_wells:\n df = df.append({'Well Authorization Number':well_num}, ignore_index=True)\n df_idx = len(df) - 1\n tmp_well_df = self.feature_list[self.feature_list['Well Authorization Number'] == well_num]\n tmp_well_df.is_copy = None\n for headername in all_headers:\n tmp_well_df_first_idx = int(tmp_well_df.index[0])\n\n if headername in first_heads:\n df[headername][df_idx] = tmp_well_df[headername][tmp_well_df_first_idx]\n elif headername in min_heads:\n tmp_well_df[headername] = pd.to_numeric(tmp_well_df[headername], errors='coerce')\n tmp_well_df[headername].replace(np.nan, 0.0, inplace=True)\n df[headername][df_idx] = tmp_well_df.mode(numeric_only=True)[headername].min()\n elif headername in max_heads:\n tmp_well_df[headername] = pd.to_numeric(tmp_well_df[headername], errors='coerce')\n tmp_well_df[headername].replace(np.nan, 0.0, inplace=True)\n df[headername][df_idx] = tmp_well_df.mode(numeric_only=True)[headername].max()\n elif headername in average_heads:\n tmp_well_df[headername] = pd.to_numeric(tmp_well_df[headername], errors='coerce')\n tmp_well_df[headername].replace(np.nan, 0.0, inplace=True)\n df[headername][df_idx] = tmp_well_df.mode(numeric_only=True)[headername].mean()\n\n self.feature_list = df\n","repo_name":"chews0n/glowing-waffle","sub_path":"glowingwaffle/data/ScrapeOGC.py","file_name":"ScrapeOGC.py","file_ext":"py","file_size_in_byte":27241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"3193257584","text":"# coding=utf-8\nfrom .import cql_string\nfrom py2neo import Graph, RelationshipMatcher, NodeMatcher\nfrom .neo4j_mining_Edge import Neo4jMiningEdge\n\n\nBATCH_SIZE = 10000\n\n\ndef get_neo4j_connect(ip, username, password):\n return Graph(ip, username=username, password=password)\n\n\ndef get_relation_matcher(graph):\n relation_matcher = RelationshipMatcher(graph)\n node_selector = NodeMatcher(graph)\n return relation_matcher, node_selector\n\n\ndef run_cql(graph, cql):\n cypher = graph.run\n return cypher(cql)\n\n\ndef get_all_lables(graph):\n label_name = run_cql(graph, cql_string.get_all_lable)\n # return [list(_.values())[0] for _ in res.data()]\n return [_[\"label\"] for _ in label_name.data()]\n\n\ndef get_all_relations(graph):\n relation_name = run_cql(graph, cql_string.get_all_relations)\n # return [list(_.values())[0] for _ in res.data()]\n return [_[\"relation\"] for _ in relation_name.data()]\n\n\ndef get_lable_num(graph):\n lables = get_all_lables(graph)\n lables_sql = [cql_string.get_lable_count % (_, \"num\") for _ in lables]\n print(lables_sql)\n nums = [run_cql(graph, _).data()[0][\"num\"] for _ in lables_sql]\n return dict(zip(lables, nums))\n\n\ndef get_relation_num(graph):\n relations = get_all_relations(graph)\n relations_sql = [cql_string.get_all_relation_count % (_, \"num\") for _ in relations]\n print(relations_sql)\n nums = [run_cql(graph, _).data()[0][\"num\"] for _ in relations_sql]\n return dict(zip(relations, nums))\n\n\ndef get_all_1_degree_shape(graph):\n shape = run_cql(graph, cql_string.get_all_1_hrt_shape)\n # print(shape.data())\n # for _ in shape.data():\n # print(_[\"h_label\"][0], _[\"r_label\"], _[\"t_label\"][0])\n return [(_[\"h_label\"][0], _[\"r_label\"], _[\"t_label\"][0]) for _ in shape.data()]\n\n\ndef get_1_degree_shape_count(graph):\n h_r_t = get_all_1_degree_shape(graph)\n print(h_r_t)\n h_r_t_count_sql = [cql_string.get_all_1_hrt_shape_count % (_[0], _[1], _[2], \"num\") for _ in h_r_t]\n print(h_r_t_count_sql)\n nums = [run_cql(graph, _).data()[0][\"num\"] for _ in h_r_t_count_sql]\n return dict(zip(h_r_t, nums))\n\n\ndef match_by_hrt_lable(graph, hrt_lable, num):\n return match_by_hrt_lable_(graph, hrt_lable[0], hrt_lable[1], hrt_lable[2], num)\n\n\ndef match_by_hrt_lable_(graph, h_lable, r_lable, t_lable, num):\n batchsize = BATCH_SIZE\n batch = 0\n res_list = []\n while batch < num:\n batchcql = cql_string.match_data_by_hrt_lable % (h_lable, r_lable, t_lable, batch, batchsize)\n batch = batch + batchsize\n print(batchcql)\n relation = run_cql(graph, batchcql)\n res_list = res_list + relation.data()\n return res_list\n\n\ndef hrt_lable_generator(graph, h_lable, r_lable, t_lable, num):\n batchsize = 1\n batch = 0\n while batch < num:\n batchcql = cql_string.match_data_by_hrt_lable % (h_lable, r_lable, t_lable, batch, batchsize)\n batch = batch + batchsize\n print(batchcql)\n relation = run_cql(graph, batchcql).data()[0]\n yield relation[\"h\"], relation[\"r\"], relation[\"t\"]\n \"\"\"\n batchcql = cql_string.match_data_by_hrt_lable_no_batch % (h_lable, r_lable, t_lable)\n relation = run_cql(graph, batchcql)\n print(type(relation))\n while True:\n try:\n s = next(relation)\n print(s[\"h\"], s[\"r\"], s[\"t\"])\n except StopIteration:\n print(\"StopIteration....\")\n return\n \"\"\"\n\n\ndef get_labels_by_id(graph, node_id):\n label_cql = cql_string.get_nodelable_by_id % node_id\n label = run_cql(graph, label_cql).data()[0][\"label\"][0]\n return label\n\n\ndef get_relation_by_hid_tid(graph, h_node_id, t_node_id):\n relation_cql = cql_string.get_relation_by_hid_tid % (h_node_id, t_node_id)\n print(relation_cql)\n relation = run_cql(graph, relation_cql)\n relation = relation.data()\n if len(relation) != 0:\n relation = relation[0][\"relation\"]\n return relation\n else:\n return None\n\n\ndef get_all_node_id(graph):\n id_cql = cql_string.get_all_node_id\n id_s = run_cql(graph, id_cql)\n id_s = id_s.data()\n if len(id_s) != 0:\n id_s = [_[\"node_id\"] for _ in id_s]\n return id_s\n else:\n return None\n\n\ndef node_id_generator(graph):\n node_count = get_all_node_count(graph)\n batchsize = BATCH_SIZE\n batch = 0\n while batch < node_count:\n batch_nodeid_cql = cql_string.get_all_node_id_batch % (batch, batchsize)\n batch = batch + batchsize\n print(batch_nodeid_cql)\n node_ids = run_cql(graph, batch_nodeid_cql)\n node_ids = [_[\"node_id\"] for _ in node_ids]\n yield node_ids\n print(\"all node Done......\", node_count, node_count)\n\n\ndef get_all_node_count(graph):\n count_cql = cql_string.get_all_node_count\n count = run_cql(graph, count_cql)\n count = count.data()\n print(count)\n return count[0][\"node_count\"]\n\n\ndef node_generator(graph):\n count = len(graph.nodes)\n node_index = 0\n all_graph_nodes = 0\n # MATCH (n) WHERE id(n)=215 RETURN n;\n while all_graph_nodes < count:\n temp_node = graph.nodes.get(node_index)\n if temp_node is not None:\n yield temp_node\n all_graph_nodes += 1\n node_index += 1\n print(\"all node Done......\", count, all_graph_nodes)\n\n\ndef get_1D_node_by_startnode_id(graph, node_id):\n id_labels_cql = cql_string.get_1D_node_by_startnode_id_and_relationname % node_id\n id_labels = run_cql(graph, id_labels_cql)\n id_labels = id_labels.data()\n id_labels = [(_[\"node_id\"], _[\"node_label\"][0], _[\"relation_name\"]) for _ in id_labels]\n print(id_labels)\n return id_labels\n\n\ndef get_1D_node_by_startnode_id_2_Neo4jMiningEdge(graph, node_id):\n start_lable = get_labels_by_id(graph, node_id)\n # [(16213, 'person', 'former_colleague')......]\n id_labels_cql = cql_string.get_1D_node_by_startnode_id_and_relationname % node_id\n id_labels = run_cql(graph, id_labels_cql)\n id_labels = id_labels.data()\n # Neo4jMiningEdge(frm, to, elb, frmlb, tolb)\n edges = [Neo4jMiningEdge(node_id, _[\"node_id\"], _[\"relation_name\"], start_lable, _[\"node_label\"][0])\n for _ in id_labels]\n return edges\n\n\ndef get_data_by_hrt_lable(graph, h_label, r_label, t_label):\n hrt_cql = cql_string.get_data_by_hrt_lable % (h_label, r_label, t_label)\n hrt = run_cql(graph, hrt_cql)\n hrt = hrt.data()\n hrt = [(_[\"h\"], _[\"r\"], _[\"t\"]) for _ in hrt]\n # print(hrt)\n return hrt\n","repo_name":"fangjiegao/ssm_neo4j","sub_path":"ssm_neo4j/neo4j_mining_tool.py","file_name":"neo4j_mining_tool.py","file_ext":"py","file_size_in_byte":6434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35688858399","text":"# -*- coding: utf-8 -*-\n\nfrom models import User, Item, ItemBrand, ItemCategory, Device, Order, Road\nfrom models import Redeem, Supplyer, AddressType, RedeemActivity, VoiceActivity\nfrom models import VoiceWord, SupplyList, DayDeviceStat\nfrom datetime import datetime, timedelta\nfrom const import (RoadStatusMsg, FaultMsg, OrderStatusMsg, RedeemStatusMsg,\n SupplyStatusMsg)\n\n\"\"\"\n筛选器\n\"\"\"\n\nMODEL_NAMES = {\n \"user\": User,\n \"item\": Item,\n \"itembrand\": ItemBrand,\n \"brand\": ItemBrand,\n \"itemcategory\": ItemCategory,\n \"category\": ItemCategory,\n \"device\": Device,\n \"order\": Order,\n \"road\": Road,\n \"redeem\": Redeem,\n \"voiceword\": VoiceWord,\n \"address_type\": AddressType,\n \"supplyer\": Supplyer,\n \"supplylist\": SupplyList,\n (\"redeem\", \"activity\"): RedeemActivity,\n (\"voiceword\", \"activity\"): VoiceActivity,\n \"daydevicestat\": DayDeviceStat\n}\n\n\nclass AttributeSelector(object):\n OPS = []\n\n def __init__(self, mcls, attr_name, operator, value):\n if operator not in self.OPS:\n raise Exception(\"operator error\")\n self.mcls = mcls\n self.attr_name = attr_name\n self.operator = operator\n self.value = value\n\n def parse(self):\n raise NotImplemented\n\n def __str__(self):\n return \"{}({}{}{})\".format(self.__class__.__name__,\n self.attr_name,\n self.operator,\n self.value)\n\n @classmethod\n def get_options(self, attr, proxy_name=\"\"):\n return []\n\n\nclass NumberSelector(AttributeSelector):\n\n OPS = [\n \">\",\n \"<\",\n \"=\",\n \">=\",\n \"<=\",\n \"≠\",\n ]\n TYPE_NAME = \"number\"\n\n def parse(self):\n attr = getattr(self.mcls, self.attr_name)\n op = self.operator\n if op == \">\":\n return attr > self.value\n elif op == \"≠\":\n return attr != self.value\n elif op == \"=\":\n return attr == self.value\n elif op == \"<\":\n return attr < self.value\n elif op == \">=\":\n return attr >= self.value\n elif op == \"<=\":\n return attr <= self.value\n\n\nclass IDSelector(AttributeSelector):\n\n OPS = [\n \"=\",\n \"≠\",\n ]\n TYPE_NAME = \"id\"\n\n def parse(self):\n attr = getattr(self.mcls, self.attr_name)\n op = self.operator\n if op == \"≠\":\n return attr != self.value\n elif op == \"=\":\n return attr == self.value\n\n @classmethod\n def get_options(self, attr, proxy_name=\"\"):\n if proxy_name == RoadSelectorProxy.name:\n if attr == \"status\":\n return [{\"key\": k, \"name\": v} for k, v in RoadStatusMsg.items()]\n elif attr == \"fault\":\n return [{\"key\": k, \"name\": v} for k, v in FaultMsg.items()]\n elif proxy_name == OrderSelectorProxy.name:\n if attr == \"status\":\n return [{\"key\": k, \"name\": v} for k, v in OrderStatusMsg.items()]\n elif proxy_name == RedeemSelectorProxy.name:\n if attr == \"status\":\n return [{\"key\": k, \"name\": v} for k, v in RedeemStatusMsg.items()]\n elif proxy_name == VoiceWordSelectorProxy.name:\n if attr == \"status\":\n return [{\"key\": k, \"name\": v} for k, v in RedeemStatusMsg.items()]\n elif proxy_name == SupplyListSelectorProxy.name:\n if attr == \"status\":\n return [{\"key\": k, \"name\": v} for k, v in SupplyStatusMsg.items()]\n\n tmp = attr.split(\"__\")\n if len(tmp) > 1:\n model_name = tmp[-1]\n else:\n model_name = tmp[0]\n\n if (proxy_name, model_name) in MODEL_NAMES:\n model_cls = MODEL_NAMES[(proxy_name), model_name]\n else:\n model_cls = MODEL_NAMES[model_name]\n\n qs = model_cls.select()\n return [{\"key\": obj.key, \"name\": obj.name} for obj in qs]\n\n\nclass StringSelector(AttributeSelector):\n\n OPS = [\n \"是\",\n \"包含\",\n \"不是\",\n \"不包含\",\n \"开头是\",\n \"结尾是\",\n \"开头不是\",\n \"结尾不是\"\n ]\n TYPE_NAME = \"string\"\n\n def parse(self):\n attr = getattr(self.mcls, self.attr_name)\n op = self.operator\n if op == \"是\":\n return attr == self.value\n elif op == \"不是\":\n return attr != self.value\n elif op == \"包含\":\n return attr.contains(self.value)\n elif op == \"不包含\":\n return ~attr.contains(self.value)\n elif op == \"开头是\":\n return attr.startswith(self.value)\n elif op == \"结尾是\":\n return attr.endswith(self.value)\n elif op == \"开头不是\":\n return ~attr.startswith(self.value)\n elif op == \"结尾不是\":\n return ~attr.endswith(self.value)\n\n\nclass DateSelector(AttributeSelector):\n\n OPS = [\n \"最近\",\n \"固定时段\"\n ]\n TYPE_NAME = \"date\"\n\n def parse(self):\n attr = getattr(self.mcls, self.attr_name)\n op = self.operator\n\n if op == \"最近\":\n start_date = datetime.now() - timedelta(days=int(self.value))\n return attr >= start_date\n elif op == \"固定时段\":\n start_date = datetime.strptime(self.value[0], \"%Y-%m-%d\")\n end_date = datetime.strptime(self.value[1], \"%Y-%m-%d\")\n end_date = end_date + timedelta(days=1)\n return (attr >= start_date) & (attr < end_date)\n\n\nclass BooleanSelector(AttributeSelector):\n\n OPS = [\n \"是\",\n \"不是\"\n ]\n TYPE_NAME = \"bool\"\n\n def parse(self):\n assert self.value in [True, False]\n attr = getattr(self.mcls, self.attr_name)\n return attr == self.value\n\n @classmethod\n def get_options(self, attr, proxy_name=\"\"):\n return {\n True: \"是\",\n False: \"否\",\n }\n\n\nclass EventSelector(object):\n\n pass\n\n\nclass SelectorProxy(object):\n name = \"\"\n attribute_selectors = {}\n sub_proxies = {}\n ignore_displays = tuple() # 不在前端显示的Selctor\n\n def __init__(self, conditions):\n self.conditions = conditions\n self.to_join_models = set()\n self.parse()\n\n @classmethod\n def get_display_info(cls, name):\n\n if not SelectorProxy.sub_proxies:\n for subcls in SelectorProxy.__subclasses__():\n SelectorProxy.sub_proxies[subcls.name] = subcls\n\n selector_cls = SelectorProxy.sub_proxies[name]\n\n attribute_selectors = []\n ops = {}\n for attribute, (display, selector) in selector_cls.attribute_selectors.items():\n if attribute in selector_cls.ignore_displays:\n continue\n\n attribute_selectors.append({\n \"attribute\": attribute,\n \"displayName\": display,\n \"dataType\": selector.TYPE_NAME,\n \"options\": selector.get_options(attribute, proxy_name=name),\n })\n if selector.TYPE_NAME not in ops:\n ops[selector.TYPE_NAME] = selector.OPS\n\n return {\n \"attributeSelectors\": attribute_selectors,\n \"eventSelectors\": [],\n \"operators\": ops,\n }\n\n def _parse_single(self, condition):\n if \"attribute\" in condition:\n attr_name = condition[\"attribute\"]\n if attr_name not in self.attribute_selectors:\n return\n\n if \"value\" not in condition or \"operator\" not in condition:\n return\n\n trans = self.trans_condition(attr_name, condition[\"value\"])\n if trans:\n return trans\n\n _, selector_cls = self.attribute_selectors[attr_name]\n\n attr_split = attr_name.split(\"__\")\n if len(attr_split) == 2:\n model_name, attr_name = tuple(attr_split)\n mcls = MODEL_NAMES[model_name]\n self.to_join_models.add(mcls)\n else:\n mcls = MODEL_NAMES[self.name]\n\n try:\n selector = selector_cls(mcls,\n attr_name,\n condition[\"operator\"],\n condition[\"value\"])\n except:\n return\n return selector.parse()\n\n elif \"event\" in condition: # 时间帅选器\n pass\n\n def trans_condition(self, attr, val):\n \"条件转换\"\n return None\n\n def parse(self):\n and_lst = []\n for or_conds in self.conditions:\n or_lst = []\n for cond in or_conds:\n tmp = self._parse_single(cond)\n if not tmp:\n continue\n or_lst.append(tmp)\n\n if or_lst:\n and_lst.append(reduce(lambda x, y: x | y, or_lst))\n\n if and_lst:\n where = reduce(lambda x, y: x & y, and_lst)\n else:\n where = None\n self.where_phrase = where\n\n def select(self):\n qs = MODEL_NAMES[self.name].select()\n for m in self.to_join_models:\n qs = qs.join(m)\n\n mcls = MODEL_NAMES[self.name]\n if self.where_phrase is not None:\n return qs.where(self.where_phrase).order_by(-getattr(mcls, \"id\"))\n else:\n return qs.order_by(-getattr(mcls, \"id\"))\n\n\nclass UserSelectorProxy(SelectorProxy):\n\n name = \"user\"\n attribute_selectors = {\n \"username\": (\"用户名\", StringSelector),\n \"mobile\": (\"手机号\", StringSelector),\n \"created_at\": (\"注册时间\", DateSelector)\n }\n\n\nclass AdminSelectorProxy(SelectorProxy):\n\n name = \"admin\"\n attribute_selectors = {\n \"username\": (\"用户名\", StringSelector),\n \"mobile\": (\"手机号\", StringSelector),\n \"created_at\": (\"注册时间\", DateSelector)\n }\n\n\nclass ItemSelectorProxy(SelectorProxy):\n\n name = \"item\"\n attribute_selectors = {\n \"name\": (\"商品名称\", StringSelector),\n \"brand\": (\"商品品牌\", IDSelector),\n \"category\": (\"商品分类\", IDSelector)\n }\n\n\nclass ItemCategorySelectorProxy(SelectorProxy):\n\n name = \"itemcategory\"\n attribute_selectors = {\n \"name\": (\"分类名称\", StringSelector),\n }\n\n\nclass ItemBrandSelectorProxy(SelectorProxy):\n\n mcls = ItemBrand\n name = \"itembrand\"\n attribute_selectors = {\n \"name\": (\"品牌名称\", StringSelector),\n }\n\n\nclass DeviceSelectorProxy(SelectorProxy):\n\n name = \"device\"\n attribute_selectors = {\n \"involved\": (\"是否已接入\", BooleanSelector),\n \"name\": (\"设备名称\", StringSelector),\n \"no\": (\"设备sn\", StringSelector),\n \"online\": (\"是否在线\", BooleanSelector),\n \"address_type\": (\"投放类型\", IDSelector),\n \"supplyer\": (\"配货员\", IDSelector),\n \"province\": (\"省份\", StringSelector),\n \"is_stockout\": (\"是否缺货\", BooleanSelector)\n }\n\n ignore_displays = (\"involved\", )\n\n def trans_condition(self, attr, val):\n if attr == \"online\":\n if val is True:\n return Device.heartbeat_at > (datetime.now() -\n timedelta(seconds=Device.ONLINE_SECONDS))\n else:\n return Device.heartbeat_at <= (datetime.now() -\n timedelta(seconds=Device.ONLINE_SECONDS))\n return None\n\n\nclass OrderSelectorProxy(SelectorProxy):\n\n name = \"order\"\n attribute_selectors = {\n \"item__name\": (\"商品名称\", StringSelector),\n \"no\": (\"订单号\", StringSelector),\n \"status\": (\"订单状态\", IDSelector),\n \"device__province\": (\"省份\", StringSelector),\n \"created_at\": (\"订单时间\", DateSelector),\n \"device__address_type\": (\"场地\", IDSelector),\n \"item\": (\"商品编号\", IDSelector),\n \"device\": (\"设备\", IDSelector)\n }\n\n\nclass RoadSelectorProxy(SelectorProxy):\n\n name = \"road\"\n attribute_selectors = {\n \"item\": (\"商品\", IDSelector),\n \"device\": (\"设备\", IDSelector),\n \"device__address_type\": (\"场地\", IDSelector),\n \"device__supplyer\": (\"补货员\", IDSelector)\n # \"device__name\": (\"设备名\", StringSelector),\n # \"device__online\": (\"是否在线\", BooleanSelector),\n # \"status\": (\"配货状态\", IDSelector),\n # \"fault\": (\"故障状态\", IDSelector),\n }\n\n\nclass RedeemSelectorProxy(SelectorProxy):\n\n name = \"redeem\"\n attribute_selectors = {\n \"user__username\": (\"用户名\", StringSelector),\n \"activity\": (\"活动ID\", IDSelector),\n \"status\": (\"兑换状态\", IDSelector),\n \"device\": (\"设备ID\", IDSelector),\n \"use_at\": (\"兑换日期\", DateSelector),\n }\n\n\nclass VoiceWordSelectorProxy(SelectorProxy):\n\n name = \"voiceword\"\n attribute_selectors = {\n \"user__username\": (\"用户名\", StringSelector),\n \"activity\": (\"活动ID\", IDSelector),\n \"activity__item\": (\"商品ID\", IDSelector),\n \"status\": (\"兑换状态\", IDSelector),\n \"device\": (\"设备ID\", IDSelector),\n \"use_at\": (\"兑换日期\", DateSelector),\n }\n\n\nclass SupplyListSelectorProxy(SelectorProxy):\n\n name = \"supplylist\"\n attribute_selectors = {\n \"status\": (\"配货状态\", IDSelector),\n }\n\n\nclass DayDeviceStatProxy(SelectorProxy):\n\n name = \"daydevicestat\"\n attribute_selectors = {\n \"device\": (\"设备编号\", IDSelector),\n \"device__address_type\": (\"场地\", IDSelector),\n \"created_at\": (\"时间\", DateSelector),\n }","repo_name":"tonyfromsz/S-InvBox","sub_path":"selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":13711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7748035433","text":"########################################\n__author__ = \"Abdelrahman Eldesokey\"\n__license__ = \"GNU GPLv3\"\n__version__ = \"0.1\"\n__maintainer__ = \"Abdelrahman Eldesokey\"\n__email__ = \"abdo.eldesokey@gmail.com\"\n########################################\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.conv import _ConvNd\nimport numpy as np\nfrom scipy.stats import poisson\nfrom scipy import signal\n\n# The proposed Normalized Convolution Layer\nclass NConv2d(_ConvNd):\n def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus', init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True):\n \n # Call _ConvNd constructor\n super(NConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, (padding,padding), dilation, False, 0, groups, bias,'zeros')\n \n self.eps = 1e-20\n self.pos_fn = pos_fn\n self.init_method = init_method\n \n # Initialize weights and bias\n self.init_parameters()\n \n if self.pos_fn is not None :\n EnforcePos.apply(self, 'weight', pos_fn)\n\n \n \n def forward(self, data, conf):\n \n # Normalized Convolution\n denom = F.conv2d(conf, self.weight, None, self.stride,\n self.padding, self.dilation, self.groups) \n nomin = F.conv2d(data*conf, self.weight, None, self.stride,\n self.padding, self.dilation, self.groups) \n nconv = nomin / (denom+self.eps)\n \n \n # Add bias\n b = self.bias\n sz = b.size(0)\n b = b.view(1,sz,1,1)\n b = b.expand_as(nconv)\n nconv += b\n \n # Propagate confidence\n cout = denom\n sz = cout.size()\n cout = cout.view(sz[0], sz[1], -1)\n \n k = self.weight\n k_sz = k.size()\n k = k.view(k_sz[0], -1)\n s = torch.sum(k, dim=-1, keepdim=True) \n\n cout = cout / s\n cout = cout.view(sz)\n \n return nconv, cout\n \n \n def init_parameters(self):\n # Init weights\n if self.init_method == 'x': # Xavier \n torch.nn.init.xavier_uniform_(self.weight)\n elif self.init_method == 'k': # Kaiming\n torch.nn.init.kaiming_uniform_(self.weight)\n elif self.init_method == 'p': # Poisson\n mu=self.kernel_size[0]/2 \n dist = poisson(mu)\n x = np.arange(0, self.kernel_size[0])\n y = np.expand_dims(dist.pmf(x),1)\n w = signal.convolve2d(y, y.transpose(), 'full')\n w = torch.Tensor(w).type_as(self.weight)\n w = torch.unsqueeze(w,0)\n w = torch.unsqueeze(w,1)\n w = w.repeat(self.out_channels, 1, 1, 1)\n w = w.repeat(1, self.in_channels, 1, 1)\n self.weight.data = w + torch.rand(w.shape)\n \n # Init bias\n self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)\n \n \n# Non-negativity enforcement class \nclass EnforcePos(object):\n def __init__(self, pos_fn, name):\n self.name = name\n self.pos_fn = pos_fn\n\n\n @staticmethod\n def apply(module, name, pos_fn):\n fn = EnforcePos(pos_fn, name)\n \n module.register_forward_pre_hook(fn) \n\n return fn\n\n def __call__(self, module, inputs):\n if module.training:\n weight = getattr(module, self.name)\n weight.data = self._pos(weight).data\n else:\n pass\n\n def _pos(self, p):\n pos_fn = self.pos_fn.lower()\n if pos_fn == 'softmax':\n p_sz = p.size()\n p = p.view(p_sz[0],p_sz[1], -1)\n p = F.softmax(p, -1)\n return p.view(p_sz)\n elif pos_fn == 'exp':\n return torch.exp(p)\n elif pos_fn == 'softplus':\n return F.softplus(p, beta=10)\n elif pos_fn == 'sigmoid':\n return F.sigmoid(p)\n else:\n print('Undefined positive function!')\n return \n \n","repo_name":"GDAOSU/vis2mesh","sub_path":"model/model_parts/nconv.py","file_name":"nconv.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"62"} +{"seq_id":"24787544536","text":"import os\nimport keras \nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.style.use('dark_background')\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder \n\nencoder = OneHotEncoder()\nencoder.fit([[0], [1]]) \n\n# 0 - Tumor\n# 1 - Normal\n\n# This cell updates result list for images with tumor\ndata = []\npaths = []\nresult = []\n\nfor r, d, f in os.walk(r'./brain_tumor_dataset/yes'):\n for file in f:\n if '.jpg' in file:\n paths.append(os.path.join(r, file))\n\nfor path in paths:\n img = Image.open(path)\n img = img.resize((128,128))\n img = np.array(img)\n if(img.shape == (128,128,3)):\n data.append(np.array(img))\n result.append(encoder.transform([[0]]).toarray())\n\n# This cell updates result list for images without tumor\npaths = []\nfor r, d, f in os.walk(r\"./brain_tumor_dataset/no\"):\n for file in f:\n if '.jpg' in file:\n paths.append(os.path.join(r, file))\n\nfor path in paths:\n img = Image.open(path)\n img = img.resize((128,128))\n img = np.array(img)\n if(img.shape == (128,128,3)):\n data.append(np.array(img))\n result.append(encoder.transform([[1]]).toarray())\n\ndata = np.array(data)\ndata.shape\n\nresult = np.array(result)\nresult = result.reshape(139,2)\n\n# Splitting the Data into Training & Testing\nx_train,x_test,y_train,y_test = train_test_split(data, result, test_size=0.2, shuffle=True, random_state=0)\n\n# Model building\nmodel = Sequential()\n\nmodel.add(Conv2D(32, kernel_size=(2, 2), input_shape=(128, 128, 3), padding = 'Same'))\nmodel.add(Conv2D(32, kernel_size=(2, 2), activation ='relu', padding = 'Same'))\n\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, kernel_size = (2,2), activation ='relu', padding = 'Same'))\nmodel.add(Conv2D(64, kernel_size = (2,2), activation ='relu', padding = 'Same'))\n\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(2, activation='softmax'))\n\nmodel.compile(loss = \"categorical_crossentropy\", optimizer='Adamax')\nprint(model.summary())\n\ny_train.shape\n\nhistory = model.fit(x_train, y_train, epochs = 20, batch_size = 40, verbose = 1,validation_data = (x_test, y_test))\n\n# Plotting Losses\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model Loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Test', 'Validation'], loc='upper right')\nplt.show()\n\n# Checking the Model\ndef names(number):\n if number==0:\n return 'Its a Tumor'\n else:\n return 'No, Its not a tumor'\n\n# Test No Tumor Image - Negative case\nfrom matplotlib.pyplot import imshow\nimg = Image.open(r\"./brain_tumor_dataset/no/N17.jpg\")\nx = np.array(img.resize((128,128)))\nx = x.reshape(1,128,128,3)\nres = model.predict_on_batch(x)\nclassification = np.where(res == np.amax(res))[1][0]\nimshow(img)\nprint(str(res[0][classification]*100) + '% Confidence This Is ' + names(classification))\n\n# Test Tumor Image - Postive case\nfrom matplotlib.pyplot import imshow\nimg = Image.open(r\"./brain_tumor_dataset/yes/Y3.jpg\")\nx = np.array(img.resize((128,128)))\nx = x.reshape(1,128,128,3)\nres = model.predict_on_batch(x)\nclassification = np.where(res == np.amax(res))[1][0]\nimshow(img)\nprint(str(res[0][classification]*100) + '% Confidence This Is A ' + names(classification))\n","repo_name":"preetika22/CC-JAN-Data_Science","sub_path":"Task2_Tumor_detection/BrainTumorClassification/tumor-classification.py","file_name":"tumor-classification.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23649483636","text":"\"\"\"delete columns\n\nRevision ID: 4d0d897e8da6\nRevises: 387c94576d21\nCreate Date: 2020-05-01 20:34:14.178434\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4d0d897e8da6'\ndown_revision = '387c94576d21'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('assignment', sa.Column('due', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('assignment', 'due')\n # ### end Alembic commands ###\n","repo_name":"anfelixsoto/cst205_calendar","sub_path":"migrations/versions/4d0d897e8da6_delete_columns.py","file_name":"4d0d897e8da6_delete_columns.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71935148038","text":"# 함수 정의방법\n# def function_name(param):\n# code\n\n\n# 함수 선언\ndef first_function(w):\n print('Hello ' + w)\n\nfirst_function('student')\n\n\ndef second_function(w):\n result = \"Hello, \" + str(w)\n return result\n\nprint(second_function('student'))\n\n\ndef mul(x):\n y1 = x*10\n y2 = x*20\n y3 = x*30\n return y1, y2, y3 # 여러 개의 값을 반환하는 다중반환이 가능하다.\n\nx, y, z = mul(10) # 물론 해당 함수의 결과값을 받을 때 여러 개의 변수를 통해 받아야 한다.\n\nprint(x, y, z)\n\n\ndef mul2(x):\n y1 = x*10\n y2 = x*20\n y3 = x*30\n return (y1, y2, y3) # tuple 형태로도 반환할 수 있다.\n\nt = mul2(10)\n\nprint(t)\n\n\ndef mul3(x):\n y1 = x*10\n y2 = x*20\n y3 = x*30\n return [y1, y2, y3] # list 형태로도 반환할 수 있다.\n\nl = mul3(10)\n\nprint(l)\n\n\ndef mul4(x):\n y1 = x*10\n y2 = x*20\n y3 = x*30\n return {'0': y1, '1': y2, '2': y3} # dictionary 형태로도 반환할 수 있다.\n\nd = mul4(10)\n\nprint(d.get('0'))\nprint()\n\n\n\n# 중요\n# *args, **kwargs\n\n# *args(언팩킹)\n# 어떤 시퀀스를 가지는 자료형을 매개변수로 받을 때 사용한다. ex) list, tuple, set 등\n# enumerate() 함수는 시퀀스를 가지는 자료의 인덱스와 원소에 동시에 접근할 때 사용한다.\n# 참고링크: https://www.daleseo.com/python-enumerate/\ndef args_function(*args): # 매개변수명 자유.\n for i, v in enumerate(args):\n print('Result: {}'.format(i), v)\n print('----------------')\n\nargs_function(*[12,13,14,15]) # 해당 함수의 매개변수로 list 를 보낸다. 반드시 * 를 붙여야 한다. \nargs_function(12,13,14,15) # 위의 것과 동일한데 이렇게 여러개만 입력하면 tuple 로 인식한다. \nargs_function(*('Kim', 'Lee', 'Park')) # 직접 tuple 형태로 해줄 수도 있다. \nargs_function(*{'zero', 'one', 'two'}) # 집합은 순서대로 나오지 않고 랜덤하게 나오는 것 같다. \nprint()\n\n\n# **kwargs(언팩킹)\n# kwargs 는 keyword arguments 의 줄임말이다. \n# key-value 형태의 자료형을 매개변수로 받을 때 사용한다. \ndef kwargs_function(**kwargs):\n for v in kwargs.keys():\n print('{}'.format(v), kwargs[v])\n print('----------------')\n\nkwargs_function(**{'name1': 'Kim', 'name2': 'Lee', 'name3': 'Park'}) # dictionary 형태로 보낸다. 반드시 ** 를 붙여야 한다. \nkwargs_function(name1='Kim', name2='Lee', name3='Park') # 위의 것과 동일한데 형태만 다르다.\nprint()\nprint()\n\n\n# 전체 혼합\ndef ex1(arg1, arg2, *args, **kwargs):\n print(arg1, arg2, args, kwargs)\n\nex1(10, 20, 'Kim', 'Lee', 'Park', age1=20, age2=30, age3=40) # 직접 해보면 각각 그냥 변수 2개, tuple 1개, dictionary 1개로 나오는 것을 알수있다.\nprint()\n\n\n# 중첩함수\ndef parent_function(num):\n print('부모 함수가 호출되었습니다.')\n print('매개변수로 받은 값은 {} 입니다.'.format(num))\n print('자식 함수를 호출합니다.')\n def child_function(num):\n print('자식 함수가 호출되었습니다.')\n print('부모함수로부터 받은 매개변수에 100을 더한 결과는 {} 입니다.'.format(num+100))\n print('자식 함수 호출을 종료합니다.')\n child_function(num)\n print('부모 함수 호출을 종료합니다.')\n\nparent_function(100)\nprint()\nprint()\n\n\n\n# 람다식\n# 메모리 절약, 가독성 향상, 코드 간결\n# 함수 객체 생성 -> 메모리 할당\n# 람다는 즉시 실행 함수(Heap 초기화) -> 메모리 초기화\n# 남발 시 가독성 감소...\n\na = lambda x,y: x*y # 또 다른 변수에 할당하는 형태. JS 의 익명함수와 같다.\nprint(a(5,6)) \n\ndef func(func):\n print(func(100,100))\nfunc(lambda x,y: x*y) # 함수를 매개변수로 받는 함수를 호출할 때는 lambda 식을 사용하면 편하다.\n\n","repo_name":"Hanjunhee-1/Python-study","sub_path":"04_파이썬함수및입력/01_함수.py","file_name":"01_함수.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35868289466","text":"\nimport tensorflow as tf\nimport numpy as np\n\nif __name__ == \"__main__\":\n\n tf.set_random_seed(777)\n learning_rate = 0.1\n\n x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)\n y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)\n\n input_x = tf.placeholder(dtype=tf.float32, shape=[None, 2])\n target_y = tf.placeholder(dtype=tf.float32, shape=[None, 1])\n\n # hidden_layer_1\n w1 = tf.Variable(tf.random_normal([2, 10]), name=\"w1\")\n b1 = tf.Variable(tf.random_normal([10]), name=\"b1\")\n\n layer1 = tf.sigmoid(tf.matmul(input_x, w1) + b1)\n\n # hidden_layer_2\n w2 = tf.Variable(tf.random_normal([10, 4]), name=\"w2\")\n b2 = tf.Variable(tf.random_normal([4]), name=\"b2\")\n\n layer2 = tf.sigmoid(tf.matmul(layer1, w2) + b2)\n\n # hidden_layer_3\n w3 = tf.Variable(tf.random_normal([4, 1]), name=\"w3\")\n b3 = tf.Variable(tf.random_normal([1]), name=\"b3\")\n\n # Fully_Connected_layer\n fc_layer = tf.sigmoid(tf.matmul(layer2, w3) + b3)\n\n # logL(w) == l(w) --> binary cross entropy\n cost = -tf.reduce_mean(target_y * tf.log(fc_layer) + (1 - target_y) * tf.log(1 - fc_layer))\n train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\n\n prediction = tf.cast(fc_layer > 0.5, dtype=tf.float32)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, target_y), dtype=tf.float32))\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for step in range(10000):\n _, _cost = sess.run([train, cost], feed_dict={input_x: x_data, target_y: y_data})\n if step % 100 == 0:\n print(\"cost: {}\".format(_cost))\n\n _h, _p, _a = sess.run([fc_layer, prediction, accuracy], feed_dict={input_x: x_data, target_y: y_data})\n print(\"h: {}\\n, p: {}\\n, a: {}\".format(_h, _p, _a))","repo_name":"hojeong3709/reinforcement-learning","sub_path":"tensorflow-keras-lab/1. tensorflow/ex02/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9855739374","text":"#! /usr/bin/env python3\n\nimport gsm7bit\nimport nexmo\n\nNEXMO_APIKEY = \"xxx\"\nNEXMO_SECRET = \"xxx\"\ndestination = 'xxx'\n\nc = gsm7bit.Converter()\nclient = nexmo.Client(key=NEXMO_APIKEY, secret=NEXMO_SECRET)\n\nwhile True:\n\tmsg = input(\"Message:\")\n\tdata = c.encode(msg)\n\tclient.send_message({'from': 'Test', 'to': destination, 'type': 'binary', 'udh': '050003CC0101', 'body' : data, 'protocol-id' : '65'})\n\t","repo_name":"nexmo-community/python-gsm7bit","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"39046941422","text":"import copy\nfrom ovs.dal.helpers import Descriptor\nfrom ovs.dal.relations import RelationMapper\nfrom ovs_extensions.generic.toolbox import ExtensionsToolbox\nfrom rest_framework import serializers\n\n\nclass UnsupportContentException(ValueError):\n \"\"\"\n Exception raised when an unsupported content string has been given\n \"\"\"\n pass\n\n\nclass ContentOptions(object):\n \"\"\"\n Content options to give to the serializer\n \"\"\"\n OPTION_TYPES = {'_relations_depth': (int, None, False),\n '_relations_content': (str, None, False)}\n OPTION_STARTS = {'_relation_contents_': (str, None, False)}\n\n def __init__(self, contents=None):\n \"\"\"\n Initializes a ContentOptions object based on a string representing the contents\n :param contents: Comma separated string or list of contents to serialize\n When contents is given, all non-dynamic properties would be serialized\n Further options are:\n - _dynamics: Include all dynamic properties\n - _relations: Include foreign keys and lists of primary keys of linked objects\n - _relations_contents: Apply the contents to the relations. The relation contents can be a bool or a new contents item\n - If the relations_contents=re-use: the current contents are also applied to the relation object\n - If the relations_contents=contents list: That item is subjected to the same rules as other contents\n - _relation_contents_RELATION_NAME: Apply the contents the the given relation. Same rules as _relation_contents apply here\n _ _relations_depth: Depth of relational serialization. Defaults to 0.\n Specifying a form of _relations_contents change the depth to 1 (if depth was 0) as the relation is to be serialized\n Specifying it 2 with _relations_contents given will serialize the relations of the fetched relation. This causes a chain of serializations\n - dynamic_property_1,dynamic_property_2 (results in static properties plus 2 dynamic properties)\n Properties can also be excluded by prefixing the field with '-':\n - contents=_dynamic,-dynamic_property_2,_relations (static properties, all dynamic properties except for dynamic_property_2 plus all relations)\n Relation serialization can be done by asking for it:\n - contents=_relations,_relations_contents=re-use\n :type contents: list or str\n :raises UnsupportedContentException: If a content string is passed which is not valid\n \"\"\"\n super(ContentOptions, self).__init__()\n\n verify_params = copy.deepcopy(self.OPTION_TYPES)\n self.content_options = {}\n self.has_content = False\n if contents is not None:\n if isinstance(contents, basestring):\n contents_list = contents.split(',')\n elif isinstance(contents, list):\n contents_list = contents\n else:\n raise UnsupportContentException('Contents should be a comma-separated list instead of \\'{0}\\''.format(contents))\n else:\n return\n self.has_content = True\n errors = []\n for option in contents_list:\n if not isinstance(option, basestring):\n errors.append('Provided option \\'{0}\\' is not a string but \\'{1}\\''.format(option, type(option)))\n continue\n split_options = option.split('=')\n if len(split_options) > 2: # Unsupported format\n errors.append('Found \\'=\\' multiple times for entry {0}'.format(split_options[0]))\n continue\n starts = [v for k, v in self.OPTION_STARTS.iteritems() if option.startswith(k)]\n if len(starts) == 1:\n verify_params[option] = starts[0]\n # Convert to some work-able types\n value = split_options[1] if len(split_options) == 2 else None\n if isinstance(value, str) and value.isdigit():\n value = int(value)\n self.content_options[split_options[0]] = value\n errors.extend(ExtensionsToolbox.verify_required_params(verify_params, self.content_options, return_errors=True))\n if len(errors) > 0:\n raise UnsupportContentException('Contents is using an unsupported format: \\n - {0}'.format('\\n - '.join(errors)))\n\n def __contains__(self, item): # In operator\n return self.has_option(item)\n\n def has_option(self, option):\n \"\"\"\n Returns True if the contentOption has the given option\n :param option: Option to search for\n :type option: str\n :return: bool\n \"\"\"\n return option in self.content_options\n\n def get_option(self, option, default=None):\n \"\"\"\n Returns the value of the given option\n :param option: Option to retrieve the value for\n :type option: str\n :param default: Default value when the key does not exist\n :type default: any\n :return: None if the value is not found else the value specified\n :rtype: NoneType or any\n \"\"\"\n return self.content_options.get(option, default)\n\n def set_option(self, option, value, must_exist=True):\n \"\"\"\n Sets an options value\n :param option: Option to set the value for\n :type option: str\n :param value: Value of the option\n :type value: any\n :param must_exist: The option must already exist before setting the option\n :type must_exist: bool\n :return: The given value (None if the key does not exist)\n :rtype: NoneType or any\n \"\"\"\n if must_exist is True and self.has_option(option) is False:\n return None\n self.content_options[option] = value\n return value\n\n def increment_option(self, option):\n \"\"\"\n Increments the value for the given option. If the option is not present or no value passed, this won't do anything\n :param option: Option to increment the value for\n :type option: str\n :return: The new value or None if they key is not found or not an integer\n :rtype: int or NoneType\n \"\"\"\n value = self.get_option(option)\n if isinstance(value, int):\n return self.set_option(option, value + 1, must_exist=True)\n return None # For readability\n\n def decrement_options(self, option):\n \"\"\"\n Decrements the value for the given option. If the option is not present or no value passed, this won't do anything\n :param option: Option to increment the value for\n :type option: str\n :return: The new value or None if they key is not found or not an integer\n :rtype: int or NoneType\n \"\"\"\n value = self.get_option(option)\n if isinstance(value, int):\n return self.set_option(option, value - 1, must_exist=True)\n return None # For readability\n\n\n# noinspection PyProtectedMember\nclass FullSerializer(serializers.Serializer):\n \"\"\"\n Serializes the persistent and dynamic stack of a hybrid object\n \"\"\"\n guid = serializers.Field() # Always include the GUID\n\n class Meta(object):\n \"\"\"\n Meta class. Holds some information about the serializer\n - fields: Fields which included by default (can be edited by using the 'fields' attr in the serializer\n - read_only_fields: Indicates which fields are read only (can be edited by using the 'read_only_fields' attr in the serializer\n \"\"\"\n fields = ('guid',)\n read_only_fields = ('guid',)\n\n def __init__(self, hybrid, contents=None, depth=None, *args, **kwargs):\n \"\"\"\n Initializes the serializer, mapping field types\n :param hybrid: Hybrid object to serialize\n :type hybrid: any (ovs.dal.hybrids.X.X)\n :param contents: Contents to serialize. Without contents, only the GUID is serialized\n When contents is given, all non-dynamic properties are serialized\n Further options are:\n - _dynamics: Include all dynamic properties\n - _relations: Include foreign keys and lists of primary keys of linked objects\n - _relations_contents: Apply the contents to the relations. The relation contents can be a bool or a new contents item\n - If the relations_contents=re-use: the current contents are also applied to the relation object\n - If the relations_contents=contents list: That item is subjected to the same rules as other contents\n - _relation_contents_RELATION_NAME: Apply the contents the the given relation. Same rules as _relation_contents apply here\n _ _relations_depth: Depth of relational serialization. Defaults to 1 when relation_contents were specified.\n Specifying a form of _relations_contents change the depth to 1 (if depth was 0) as the relation is to be serialized\n Specifying it 2 with _relations_contents given will serialize the relations of the fetched relation. This causes a chain of serializations\n - dynamic_property_1,dynamic_property_2 (results in static properties plus 2 dynamic properties)\n Properties can also be excluded by prefixing the field with '-':\n - contents=_dynamic,-dynamic_property_2,_relations (static properties, all dynamic properties except for dynamic_property_2 plus all relations)\n Relation serialization can be done by asking for it:\n - contents=_relations,_relations_contents=re-use\n All relational serialization can only be used to get data. This data will be not be set-able when deserializing\n :type contents: list or none\n :param depth: Current depth of serializing, used to serialize relations\n :type depth: int\n Kwarg parameters:\n :param allow_passwords: Allow the attr 'password' to be serialized\n :type allow_passwords: bool\n Parent parameters:\n :param instance: Instance of the object to use for updating\n :type instance: an\n :param data: Initialization data (Will be applied to the instance if an instance is given)\n :type data: list[dict] or dict\n :param many: Indicate that the given instance is to be iterated for serialization\n :type many: bool\n \"\"\"\n if not isinstance(contents, ContentOptions):\n contents = ContentOptions(contents)\n allow_passwords = kwargs.pop('allow_passwords', False)\n super(FullSerializer, self).__init__(*args, **kwargs)\n self.hybrid = hybrid\n for prop in self.hybrid._properties:\n if 'password' not in prop.name or allow_passwords:\n self.fields[prop.name] = FullSerializer._map_type_to_field(prop.property_type)\n for dynamic in self.hybrid._dynamics:\n if contents.has_content is False or (('_dynamics' in contents or dynamic.name in contents) and '-{0}'.format(dynamic.name) not in contents):\n self.fields[dynamic.name] = serializers.Field()\n for relation in self.hybrid._relations:\n if contents.has_content is False or (('_relations' in contents or relation.name in contents) and '-{0}'.format(relation.name) not in contents):\n self.fields['{0}_guid'.format(relation.name)] = serializers.CharField(required=False)\n foreign_relations = RelationMapper.load_foreign_relations(hybrid) # To many side of things, items pointing towards this object\n if foreign_relations is not None:\n for key, info in foreign_relations.iteritems():\n if contents.has_content is False or (('_relations' in contents or key in contents) and '-{0}'.format(key) not in contents):\n if info['list'] is True:\n self.fields['%s_guids' % key] = serializers.Field()\n else:\n self.fields['%s_guid' % key] = serializers.Field()\n\n # Check is a relation needs to be serialized\n foreign_relations = RelationMapper.load_foreign_relations(hybrid) # To many side of things, items pointing towards this object\n if contents.has_content is False or (foreign_relations is None and len(hybrid._relations) == 0) or depth == 0:\n return\n # Foreign relations is a dict, relations is a relation object, need to differentiate\n relation_contents = contents.get_option('_relations_contents')\n relation_contents_options = copy.deepcopy(contents) if relation_contents == 're-use' else ContentOptions(relation_contents)\n relations_data = {'foreign': foreign_relations or {}, 'own': hybrid._relations}\n for relation_type, relations in relations_data.iteritems():\n for relation in relations:\n relation_key = relation.name if relation_type == 'own' else relation\n relation_hybrid = relation.foreign_type if relation_type == 'own' else Descriptor().load(relations[relation]['class']).get_object()\n # Possible extra content supplied for a relation\n relation_content = contents.get_option('_relation_contents_{0}'.format(relation_key))\n if relation_content is None and relation_contents == 're-use':\n relation_content_options = relation_contents_options\n else:\n relation_content_options = ContentOptions(relation_content)\n # Use the depth given by the contents when it's the first item to serialize\n relation_depth = contents.get_option('_relations_depth', 1 if relation_content_options.has_content else 0) if depth is None else depth\n if relation_depth is None: # Can be None when no value is give to _relations_depth\n relation_depth = 0\n if relation_depth == 0:\n continue\n # @Todo prevent the same one-to-one relations from being serialized multiple times? Not sure if helpful though\n self.fields[relation_key] = FullSerializer(relation_hybrid, contents=relation_content_options, depth=relation_depth - 1)\n\n def get_identity(self, data):\n \"\"\"\n This hook makes sure the guid is returned as primary key\n By default the serializer class will use the id key on the incoming data to determine the canonical identity of an object\n \"\"\"\n return data.get('guid', None)\n\n def restore_object(self, attrs, instance=None):\n \"\"\"\n Provides deserializing functionality for persistent properties\n Required if we want our serializer to support deserialization into fully fledged object instances.\n If we don't define this method, then deserializing data will simply return a dictionary of items.\n \"\"\"\n if instance is not None:\n for prop in self.hybrid._properties:\n setattr(instance, prop.name, attrs.get(prop.name, getattr(instance, prop.name)))\n for relation in self.hybrid._relations:\n guid_key = '{0}_guid'.format(relation.name)\n if guid_key in attrs and attrs[guid_key] != getattr(instance, guid_key):\n setattr(instance, relation.name, None if attrs[guid_key] is None else relation.foreign_type(attrs[guid_key]))\n return instance\n return self.hybrid(data=attrs)\n\n @staticmethod\n def _map_type_to_field(field_type):\n \"\"\"\n Maps the given field type to a serializer field\n \"\"\"\n if isinstance(field_type, list):\n field_type = type(field_type[0])\n if field_type is str:\n return serializers.CharField(required=False)\n if field_type is int:\n return serializers.IntegerField(required=False)\n if field_type is bool:\n return serializers.BooleanField(required=False)\n if field_type is dict:\n return serializers.WritableField(required=False)\n return serializers.Field()\n\n def deserialize(self):\n _ = self.errors # Trigger deserialization\n return self.object\n","repo_name":"openvstorage/framework","sub_path":"webapps/api/backend/serializers/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":15973,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"62"} +{"seq_id":"34549944749","text":"import PyPDF3\r\nimport os\r\nimport shutil\r\n\r\nroot=r'C:\\Users\\Benutzer_1\\OneDrive\\Dokumentumok\\TUG\\Prozessmanagement\\VO_Unterlagen_20200512'\r\nos.chdir(root)\r\n\r\n\r\n\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\n\r\n\r\n\r\n# onlyfiles = [f for f in listdir(root) if isfile(join(root, f))]\r\n\r\n# for i in onlyfiles:\r\n# if \" \" in i:\r\n# os.rename(i,i[11:].replace(\" \",\"_\"))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass Search:\r\n \r\n def __init__(self,word,rootdir,overwrite=False):\r\n self.word=word\r\n self.rootdir=rootdir\r\n self.overwrite=overwrite\r\n \r\n def find(self): \r\n \r\n if self.overwrite == True:\r\n \r\n onlyfiles = [f for f in listdir(self.rootdir) if isfile(join(self.rootdir, f))]\r\n \r\n for i in onlyfiles:\r\n if \" \" in i:\r\n os.rename(i,i.replace(\" \",\"_\"))\r\n \r\n \r\n newdir=os.path.join(root, self.word)\r\n \r\n if os.path.exists(newdir):\r\n shutil.rmtree(newdir)\r\n os.makedirs(newdir)\r\n fil=[]\r\n nums=[]\r\n for subdir, dirs, files in os.walk(self.rootdir):\r\n \r\n for file in files:\r\n \r\n try:\r\n pdf=(os.path.join(subdir, file))\r\n pdfFileObj=open(pdf,'rb')\r\n pdfReader=PyPDF3.PdfFileReader(pdfFileObj)\r\n if pdfReader.isEncrypted:\r\n pdfReader.decrypt('')\r\n \r\n nums.append(pdfReader.getNumPages()) \r\n fil.append(pdf)\r\n \r\n for i in range(pdfReader.getNumPages()):\r\n pageObj=pdfReader.getPage(i)\r\n text=pageObj.extractText()\r\n if text.find(self.word) != -1:\r\n st='copy '+str(pdf)+' '+str(newdir+\"\\\\\"+file)\r\n os.popen(st)\r\n break\r\n \r\n except:\r\n print(pdf+\"not decryptable\")\r\n \r\n \r\n return fil\r\n\r\ne=Search(\"processflow\",root,overwrite=True)\r\na=e.find()\r\n","repo_name":"mate1116/nojoke_m","sub_path":"PdfSearch.py","file_name":"PdfSearch.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41711216950","text":"import torch\nimport numpy as np\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.distributed import destroy_process_group\nimport time, os\nfrom model import RLHF\nfrom trainers.trainer import Trainer\n\n# TODO: this works but is currently crude and incomplete, critic implementation plus PPO are obvious next steps\nclass PolicyGradientTrainer(Trainer):\n def __init__(self, config):\n super().__init__(config)\n import tiktoken\n self.enc = tiktoken.get_encoding(\"gpt2\")\n self.mode = 'RL'\n \n def train(self):\n\n self.setup_ddp()\n\n ctx, meta_vocab_size = self.setup()\n\n # model init\n model = self.init_model()\n\n model = RLHF(model, self.mode, discrete_reward=self.config['discrete_reward'])\n\n if self.config['init_multihead_from'] == 'scratch':\n print(\"initializing multihead from scratch\")\n else:\n if self.config['init_multihead_from'] == 'resume':\n print(f\"Resuming training from {self.config['out_dir_multihead']}\")\n # resume training from a checkpoint.\n ckpt_path = os.path.join(self.config['out_dir_multihead'], 'ckpt.pt')\n checkpoint = torch.load(ckpt_path, map_location=self.device) \n state_dict = checkpoint['model']\n # fix the keys of the state dictionary :(\n # honestly no idea how checkpoints sometimes get this prefix, have to debug more\n unwanted_prefix = '_orig_mod.'\n for k,v in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)\n model.load_state_dict(state_dict)\n\n \n if self.config['hard_code_reward']:\n reward_model = None\n print('Using hard-coded reward')\n else:\n print('Using learned reward model')\n if self.config['separate_reward_model']:\n import copy\n reward_model = copy.deepcopy(model)\n print('Reward model instantiated separately')\n else:\n reward_model = model\n print('Reward model and actor model share backbone')\n reward_model.to(self.device)\n \n model.to(self.device)\n \n # actor_optimizer = torch.optim.AdamW(model.model.policy_head.parameters(), lr=1e-2)\n actor_optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)\n\n last_time = time.time()\n rews_all = []\n max_iters = 100000\n X, Y = self.get_batch('train') # fetch the very first batch\n t0 = time.time()\n for iter in range(max_iters):\n \n states, log_probs, log_probs_reference, rewards, advantages = model.generate(\n X, self.block_size, self.device, self.block_size, reward_model=reward_model, hard_code_reward=self.config['hard_code_reward'])\n\n # minus KL divergence\n rets = advantages * log_probs.squeeze() #- 1*(log_probs-log_probs_reference) #- 0.05*log_probs\n actor_loss = -rets.sum()\n actor_optimizer.zero_grad(set_to_none=True)\n actor_loss.backward()\n actor_optimizer.step()\n\n torch.mean(rewards)\n\n rews_all.append(rewards.mean().detach().cpu().numpy())\n\n if iter % 1000 == 0:\n t1 = time.time()\n print(f'iter: {iter}, time: {t1-t0}')\n # print(actor_loss, critic_loss)\n print(f'Actor loss: {actor_loss}, iter: {iter}')\n print(f'rets: {np.mean(rews_all[-1000:])}')\n current_time = time.time()\n # print(current_time - last_time)\n last_time = current_time\n text = model.generate(X, self.block_size, self.device, self.block_size, reward_model=reward_model)[0]\n for i in range(1):\n text_i = text[i,:]\n # print(reward(text_i))\n try:\n print(self.enc.decode(text_i.tolist()))\n except:\n continue \n\n\nclass GumbelTrainer(Trainer):\n def __init__(self, config):\n super().__init__(config)\n import tiktoken\n self.enc = tiktoken.get_encoding(\"gpt2\")\n self.mode = 'RL'\n \n def train(self):\n\n self.setup_ddp()\n\n ctx, meta_vocab_size = self.setup()\n\n # model init\n model = self.init_model()\n\n rl_model = RLHF(model, self.mode, discrete_reward=self.config['discrete_reward'])\n\n\n # The current approach is to use a separate reward model because otherwise optimisation of the reward model changes upstream parameters impacting performance of the multihead\n # I therefore load the language model from 'out_dir' and the reward model from 'out_dir_multihead'\n\n if self.config['init_multihead_from'] == 'scratch':\n print(\"initializing multihead from scratch\")\n else:\n if self.config['init_multihead_from'] == 'resume':\n print(f\"Resuming training from {self.config['out_dir']}\")\n # resume training from a checkpoint.\n ckpt_path = os.path.join(self.config['out_dir'], 'ckpt.pt')\n checkpoint = torch.load(ckpt_path, map_location=self.device) \n state_dict = checkpoint['model']\n # fix the keys of the state dictionary :(\n # honestly no idea how checkpoints sometimes get this prefix, have to debug more\n unwanted_prefix = '_orig_mod.'\n for k,v in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)\n model.load_state_dict(state_dict)\n\n separate_reward_model = True \n if separate_reward_model:\n print('Reward model instantiated as copy')\n import copy\n reward_model = copy.deepcopy(model)\n\n print(f\"Resuming reward model from {self.config['out_dir_multihead']}\")\n\n reward_model = RLHF(reward_model, self.mode, discrete_reward=self.config['discrete_reward'])\n # resume training from a checkpoint.\n ckpt_path = os.path.join(self.config['out_dir_multihead'], 'ckpt.pt')\n checkpoint = torch.load(ckpt_path, map_location=self.device) \n state_dict = checkpoint['model']\n # fix the keys of the state dictionary :(\n # honestly no idea how checkpoints sometimes get this prefix, have to debug more\n unwanted_prefix = '_orig_mod.'\n for k,v in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)\n reward_model.load_state_dict(state_dict)\n else:\n reward_model = rl_model\n rl_model.to(self.device)\n reward_model.to(self.device)\n\n gumbel_optimizer = torch.optim.AdamW(rl_model.parameters(), lr=1e-3)\n\n # initialize a GradScaler. If enabled=False scaler is a no-op\n scaler = torch.cuda.amp.GradScaler(enabled=(self.dtype == 'float16'))\n\n last_time = time.time()\n rews_all = []\n max_iters = 100000 \n \n X, Y = self.get_batch('train') # fetch the very first batch\n\n X = torch.zeros((X.shape[0], 1), dtype=torch.long).to(self.device) # for now there is no prompt\n\n t0 = time.time()\n for iter in range(max_iters):\n \n for micro_step in range(self.gradient_accumulation_steps):\n if self.ddp:\n # in DDP training we only need to sync gradients at the last micro step.\n # the official way to do this is with model.no_sync() context manager, but\n # I really dislike that this bloats the code and forces us to repeat code\n # looking at the source of that context manager, it just toggles this variable\n rl_model.require_backward_grad_sync = (micro_step == self.gradient_accumulation_steps - 1)\n with ctx:\n states, rewards = rl_model.generate_gumbel(X, self.config['episode_length'], self.device, self.block_size, reward_model=reward_model)\n mean_reward = rewards.mean()\n loss = -mean_reward\n # # immediately async prefetch next batch while model is doing the forward pass on the GPU\n # X, Y = self.get_batch('train')\n # backward pass, with gradient scaling if training in fp16\n scaler.scale(loss).backward()\n\n # clip the gradient\n if self.grad_clip != 0.0:\n scaler.unscale_(gumbel_optimizer)\n torch.nn.utils.clip_grad_norm_(rl_model.parameters(), self.grad_clip)\n # step the optimizer and scaler if training in fp16\n scaler.step(gumbel_optimizer)\n scaler.update()\n # flush the gradients as soon as we can, no need for this memory anymore\n gumbel_optimizer.zero_grad(set_to_none=True)\n\n rews_all.append(mean_reward.detach().cpu().numpy())\n eval_interval = self.config['eval_interval']\n if iter % eval_interval == 0:\n t1 = time.time()\n print(f'iter: {iter}, time: {t1-t0}')\n print(f'rets: {np.mean(rews_all[-eval_interval:])}')\n current_time = time.time()\n # print(current_time - last_time)\n last_time = current_time\n text = rl_model.generate(X, self.config['episode_length'], self.device, self.block_size, reward_model=reward_model)[0]\n for i in range(1):\n text_i = text[i,:]\n # print(reward(text_i))\n try:\n print(self.enc.decode(text_i.tolist()))\n except:\n continue ","repo_name":"savnani5/miniChatGPT","sub_path":"trainers/rl_trainer.py","file_name":"rl_trainer.py","file_ext":"py","file_size_in_byte":10172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"36930360556","text":"from email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nimport stripe\nimport pdfkit\nfrom datetime import datetime as d\nfrom ftplib import FTP\nimport smtplib\n\nRECEIVER = \"osmanaj.noel0@gmail.com\"\nSENDER = \"m122osmanaj@gmail.com\"\nPASSWORD = \"M122TestPW\"\n\nhost = \"ftp.byethost7.com\"\nusername = \"b7_31642774\"\npassword = \"FTPM122\"\n\nProductId = \"\"\nPriceId = \"\"\n\namount = 5\n\nCustomerId = \"\"\ncity = \"\"\nzip = \"\"\nstreet = \"\"\ncountry = \"\"\n\ndef createProduct():\n stripe.Product.create(name=\"Nintendo Switch\")\n\ndef createPDF(data):\n\n html = \"\"\"\n \n \n \n \n PDF\n \n \n \n

    Payment Receipt

    \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n

    Invoice To

    Name\"\"\" + stripe.Customer.retrieve(CustomerId)[\"name\"] + \"\"\"
    Address\"\"\" + street + \"
    \" + city + \" \" + zip + \"
    \" + country + \"\"\"
    Date\"\"\" + d.now().strftime(\"%H:%M:%S %Y-%m-%d\") + \"\"\"
    \n\n
    \n \n

    Products

    \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n\n for product in stripe.Product.list():\n\n price = stripe.Price.search(\n query=\"product:'\" + product[\"id\"] + \"'\",\n )\n\n for p in price:\n PriceId = p[\"id\"]\n\n html = html + \"\"\"\n \n \n \n \n \n \"\"\"\n html = html + \"\"\"\n \n \n \n
    ProductAmountPrice per UnitPrice
    \"\"\" + product[\"name\"] + \"\"\"\"\"\" + str(amount) + \"\"\"\"\"\" + str(format(stripe.Price.retrieve(PriceId)[\"unit_amount\"] / 100, '.02f')) + \"\"\"\"\"\" + str(format((stripe.Price.retrieve(PriceId)[\"unit_amount\"] * amount) / 100, '.02f')) + \"\"\"
    \n \n \n \n \n \"\"\"\n\n path_wkhtmltopdf = r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\n config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)\n pdfkit.from_string(html, r'..\\documentation\\Payment.pdf', configuration=config)\n\n\ndef send_email(subject, body):\n\n filename = r'..\\documentation\\Payment.pdf'\n msg = MIMEMultipart(\"alternative\")\n part = MIMEText(body, \"html\")\n msg.attach(part)\n\n with open(filename, \"rb\") as attachment:\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(attachment.read())\n\n encoders.encode_base64(part)\n\n part.add_header(\n \"Content-Disposition\",\n \"attachment\", filename='Payment_' + stripe.Customer.retrieve(CustomerId)[\"name\"] + '_' + d.now().strftime(\"%H:%M:%S %Y-%m-%d\") + '.pdf'\n )\n msg.attach(part)\n\n msg[\"Subject\"] = subject\n msg[\"From\"] = SENDER\n msg[\"To\"] = RECEIVER\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\n server.login(SENDER, PASSWORD)\n server.send_message(msg)\n server.quit()\n\nif __name__ == '__main__':\n stripe.api_key = \"sk_test_51KiDANElM3dtUv2KoZ7ttJQNgVlkOYDQnlVnl4etIKaEo0PEZFzIgT8znwHgrmAzj3VSJIDa64Uu1WbobPOcZFAb00Yzh1InKZ\"\n\n payment = stripe.PaymentIntent.list()\n\n Customer = stripe.Customer.create(\n balance=100,\n description=\"Test Customer\",\n email=\"osmanaj.noel0@gmail.com\",\n name=\"Noel Osmanaj\",\n address={\n \"city\": \"Bruettisellen\",\n \"country\": \"CH\",\n \"line1\": \"Im Talacher 13\",\n \"postal_code\": \"8306\",\n },\n )\n CustomerId = Customer[\"id\"]\n\n product = stripe.Product.search(\n query=\"name:'Nintendo Switch'\",\n )\n\n for p in product:\n ProductId = p[\"id\"]\n\n city = str(stripe.Customer.retrieve(CustomerId)[\"address\"][\"city\"])\n zip = str(stripe.Customer.retrieve(CustomerId)[\"address\"][\"postal_code\"])\n street = str(stripe.Customer.retrieve(CustomerId)[\"address\"][\"line1\"])\n country = str(stripe.Customer.retrieve(CustomerId)[\"address\"][\"country\"])\n\n createPDF(stripe.PaymentIntent.retrieve(\"pi_3KnIjZElM3dtUv2K1qsvQn2j\"))\n\n with FTP(host) as ftp:\n ftp.login(user=username, passwd=password)\n\n with open(r'C:\\Users\\User\\Desktop\\Aufgaben\\TBZ\\Module\\Modul 122\\LB-2\\LB2-M122\\documentation\\Payment.pdf', 'rb') as f:\n ftp.storbinary('STOR ' + r'Orders/Payment_' + stripe.Customer.retrieve(CustomerId)[\"name\"] + '_' + d.now().strftime(\"%H:%M:%S %Y-%m-%d\") + '.pdf', f)\n\n email_html = \"\"\"\\\n \n \n \n

    \n Sehr geehrte/r \"\"\" + stripe.Customer.retrieve(CustomerId)[\"name\"] + \"\"\"
    \n
    \n Vielen Dank für Ihre Bestellung in unserem E-Shop.
    \n Im Anhang erhalten Sie mit dieser Mail, eine Rechnung.
    \n Ihre Bestellung ist zurzeit in bearbeitung, Sie können diese jederzeit unter folgendem Link verfolgen.
    \n
    \n Mit freundlichen Grüssen
    \n
    \n Ihr E-Shop, Sheemo\n

    \n \n \n \"\"\"\n\n send_email(subject=\"Vielen Dank für Ihre Bestellung!\", body=email_html)\n\n stripe.Customer.delete(Customer[\"id\"])\n\n t = stripe.PaymentIntent.retrieve(\"pi_3KnIjZElM3dtUv2K1qsvQn2j\")\n\n","repo_name":"Noel-Os/LB2-M122","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18205351346","text":"from PySide.QtCore import *\nfrom PySide.QtGui import *\nimport os\nimport glob\n\nimport profileDialog_Ui\n\n\nclass ProfileDialog(QDialog, profileDialog_Ui.Ui_profileDialog):\n def __init__(self, mainDialog, type, parent=None):\n super(ProfileDialog, self).__init__(parent)\n self.setupUi(self)\n self.mainDialog = mainDialog\n self.batchFile = None\n self.type = type\n if type == \"add\":\n # copy the items from the main dialog\n for i in range(0, mainDialog.comboBox_Profile.count()):\n self.comboBox.addItem(mainDialog.comboBox_Profile.itemText(i))\n # select the current profile\n profile = mainDialog.comboBox_Profile.currentText()\n self.comboBox.setCurrentIndex(self.comboBox.findText(profile))\n self.pushButton_LoadBatch.clicked.connect(self.loadBatchFile)\n\n elif type == \"rename\":\n self.setWindowTitle(\"Rename the profile\")\n self.label.hide()\n self.comboBox.hide()\n self.pushButton_LoadBatch.hide()\n self.lineEdit.setText(mainDialog.comboBox_Profile.currentText())\n self.lineEdit.selectAll()\n\n self.lineEdit.setFocus()\n\n def loadBatchFile(self):\n fileObj = QFileDialog.getOpenFileName(self, \"Please choose a batch file...\", dir=self.mainDialog.other_dir,\n filter=\"Batch File (*.bat)\")\n batchFile = fileObj[0]\n #validate\n if len(batchFile) > 0:\n fileName = os.path.basename(batchFile)\n # create a new profile\n self.comboBox.insertItem(0, fileName)\n self.comboBox.setCurrentIndex(0)\n self.lineEdit.setText(os.path.splitext(fileName)[0])\n self.batchFile = batchFile\n\n def accept(self):\n # validate the new profile name\n if self.mainDialog.validateName(self.lineEdit):\n dst = self.lineEdit.text()\n # find the original file if there is one\n try:\n if self.type == \"add\":\n if self.batchFile:\n # load from batch file\n self.mainDialog.comboBox_Profile.insertItem(0, self.lineEdit.text())\n self.mainDialog.comboBox_Profile.setCurrentIndex(0)\n command = self.mainDialog.parseBatchFile(self.batchFile, {})[1]\n self.mainDialog.setArguments(command)\n else:\n # copy from an existing profile\n src = os.path.abspath(glob.glob(\"Profiles/\" + self.comboBox.currentText() + \".ini\")[0])\n profile = open(src, \"r\")\n content = profile.read()\n profile.close()\n open(\"Profiles/\" + dst + \".ini\", \"w\").write(content)\n elif self.type == \"rename\":\n src = os.path.abspath(\n glob.glob(\"Profiles/\" + self.mainDialog.comboBox_Profile.currentText() + \".ini\")[0])\n os.rename(src, os.path.dirname(src) + \"/\" + dst + \".ini\")\n except IndexError:\n self.mainDialog.writeProfile(dst)\n self.mainDialog.updateProfiles()\n # select the new profile in the main dialog\n index = self.mainDialog.comboBox_Profile.findText(dst)\n self.mainDialog.comboBox_Profile.setCurrentIndex(index)\n\n if not self.batchFile:\n self.mainDialog.updateProfiles()\n # select the new profile in the main dialog\n index = self.mainDialog.comboBox_Profile.findText(dst)\n self.mainDialog.comboBox_Profile.setCurrentIndex(index)\n\n QDialog.accept(self)\n","repo_name":"kevinmore/fltConverterGUI","sub_path":"profileDialog.py","file_name":"profileDialog.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"5503730942","text":"import numpy as np\nimport scipy as sp\n\nNUMBER = (int, float, complex)\n\n#Functions to handle preprocessing for bloch simulator arguments.\n\ndef process_gradient_argument(gr, points):\n \"\"\"\n Takes in a gradient argument and returns directional gradients.\n If gradients don't exist, returns array of zeros.\n If only one number is passed, it's assigned to the entire xgrad and the others are zeros.\n \"\"\"\n if isinstance(gr, NUMBER):\n return gr * np.ones(points), np.zeros(points), np.zeros(points)\n elif 1 == len(gr.shape):\n return gr, np.zeros(points), np.zeros(points)\n\n gradient_dimensions = gr.shape[0]\n\n if 3 == gradient_dimensions:\n return gr[0,:], gr[1,:], gr[2,:]\n elif 2 == gradient_dimensions:\n return gr[0,:], gr[1,:], np.zeros(points)\n else:\n return gr[0,:], np.zeros(points), np.zeros(points)\n\ndef process_time_points(tp, points):\n \"\"\"\n THREE Cases:\n\t\t1) Single value given -> this is the interval length for all.\n\t\t2) List of intervals given.\n\t\t3) Monotonically INCREASING list of end times given.\n\n\tFor all cases, the goal is for tp to have the intervals.\n \"\"\"\n if isinstance(tp, NUMBER):\n return tp * np.ones(points)\n elif points != tp.size:\n raise IndexError(\"time point length is not equal to rf length\")\n else:\n ti = np.zeros(points)\n if _times_to_intervals(tp, ti, points):\n tp = ti\n return tp\n\ndef process_off_resonance_arguments(df):\n \"\"\"\n Processes off resonance arguments.\n Returns df and size. If only one numer is passed, returns number as single array.\n \"\"\"\n if isinstance(df, NUMBER):\n return (df * np.ones(1)), 1\n return df, df.size\n\ndef process_relaxations(t1, t2, num_positions):\n \"\"\"\n If only a single relaxation value is given, assume that all of the different\n positions have the same relaxation.\n \"\"\"\n if isinstance(t1, NUMBER):\n t1seq = t1*np.ones(num_positions)\n else:\n assert len(t1) == num_positions\n t1seq = t1\n if isinstance(t2, NUMBER):\n t2seq = t2*np.ones(num_positions)\n else:\n assert len(t2) == num_positions\n t2seq = t2\n return t1seq, t2seq\n\ndef process_positions(dp):\n \"\"\"\n Gets positions vectors if they exist. Zeros otherwise.\n If only one number is passed, is set as xgrad and other directions are 0s.\n \"\"\"\n if isinstance(dp, NUMBER):\n return dp*np.ones(1), np.zeros(1), np.zeros(1), 1\n elif 1 == len(dp.shape):\n return dp, np.zeros(dp.size), np.zeros(dp.size), dp.size\n\n position_dimensions = dp.shape[0]\n number_of_positions = dp.shape[1]\n if 3 == position_dimensions:\n return dp[0,:], dp[1,:], dp[2,:], number_of_positions\n elif 2 == position_dimensions:\n return dp[0,:], dp[1,:], np.zeros(number_of_positions), number_of_positions\n else:\n return dp[0,:], np.zeros(number_of_positions), np.zeros(number_of_positions), number_of_positions\n\ndef process_magnetization(mx_0, my_0, mz_0, rf_length, freq_pos_count, mode):\n \"\"\"\n Returns mx, my, and mz vectors allocated based on input parameters.\n \"\"\"\n if isinstance(mx_0, np.ndarray) and isinstance(my_0, np.ndarray) and isinstance(mz_0, np.ndarray):\n mx_0 = mx_0.ravel()\n my_0 = my_0.ravel()\n mz_0 = mz_0.ravel()\n out_points = 1\n if (2 & mode):\n out_points = rf_length\n fn_out_points = out_points * freq_pos_count\n mx = np.zeros(fn_out_points)\n my = np.zeros(fn_out_points)\n mz = np.zeros(fn_out_points)\n if None is not mx_0 and type(mx_0) != type(0.0) and type(mx_0) != type(0) and freq_pos_count == mx_0.size and freq_pos_count == my_0.size and freq_pos_count == mz_0.size:\n for val in range(freq_pos_count):\n mx[val * out_points] = mx_0[val]\n my[val * out_points] = my_0[val]\n mz[val * out_points] = mz_0[val]\n else:\n for val in range(freq_pos_count):\n mx[val * out_points] = 0\n my[val * out_points] = 0\n mz[val * out_points] = 1\n return mx, my, mz\n\ndef reshape_matrices(mx, my, mz, ntime, n_pos, nf):\n \"\"\"\n Reshapes output matrices.\n \"\"\"\n if ntime > 1 and nf > 1 and n_pos > 1:\n shape = (nf, n_pos, ntime)\n mx.shape = shape\n my.shape = shape\n mz.shape = shape\n return\n else:\n if ntime > 1:\n shape = ((n_pos * nf), ntime)\n if 1 == (n_pos * nf):\n shape = (ntime, )\n else:\n shape = (nf, n_pos)\n if 1 == nf:\n shape = (n_pos,)\n mx.shape = shape\n my.shape = shape\n mz.shape = shape\n\ndef _times_to_intervals(endtimes, intervals, n):\n \"\"\"\n Helper function for processing time points.\n \"\"\"\n allpos = True\n lasttime = 0.0\n\n for val in range(n):\n intervals[val] = endtimes[val] - lasttime\n lasttime = endtimes[val]\n if intervals[val] <= 0:\n allpos = False\n return allpos\n","repo_name":"namalkanti/bloch-simulator-python","sub_path":"bloch/bloch_processing.py","file_name":"bloch_processing.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"62"} +{"seq_id":"32258322824","text":"#Module 12 Class Activity 6\r\n#Paul Carroll and Robert Bayer\r\n\r\n###High Level###\r\n#take the thing we are supposed to decypher\r\n#find the frequency of each of the letter\r\n#then correlate and match each letter to the frequency table\r\n#try different combinations of letters to get closest match\r\n\r\n###Algorithm###\r\n#Access the frquescy table an\r\n#LOOP: the frequency of each letter, the amount of letters\r\n#Find the percentage of each\r\n#Match percentages to the original table\r\n#FUNCTION: Finding frequency of each\r\n#FUNCTION: Finding percentages\r\n#FUNCTION: Finding total number of letters\r\nimport string\r\nimport operator\r\n\r\nfreqtable = {\"A\": 8.12, \"B\": 1.49, \"C\": 2.71, \"D\": 4.32, \"E\": 12.02, \"F\": 2.30, \"G\": 2.03, \"H\": 5.92, \"I\": 7.31,\r\n \"J\": 0.10, \"K\": 0.69, \"L\": 3.98, \"M\": 2.61, \"N\": 6.95, \"O\": 7.68, \"P\": 1.82, \"Q\": 0.11, \"R\": 6.02,\r\n \"S\": 6.28, \"T\": 9.10, \"U\": 2.88, \"V\": 1.11, \"W\": 2.09, \"X\": 0.17, \"Y\": 2.11, \"Z\": 0.07}\r\n\r\ndef freqofeach(mystring):\r\n capstring = mystring.upper()\r\n letters = {\"A\": 0, \"B\": 0, \"C\": 0, \"D\": 0, \"E\": 0, \"F\": 0, \"G\": 0, \"H\": 0, \"I\": 0, \"J\": 0, \"K\": 0, \"L\": 0, \"M\": 0,\r\n \"N\": 0, \"O\": 0, \"P\": 0, \"Q\": 0, \"R\": 0, \"S\": 0, \"T\": 0, \"U\": 0, \"V\": 0, \"W\": 0, \"X\": 0, \"Y\": 0, \"Z\": 0}\r\n for i in capstring:\r\n if i in letters:\r\n letters[i] += 1\r\n return letters\r\n\r\n\r\ndef numofletters(mystring):\r\n alpha = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\",\r\n \"v\", \"w\", \"x\", \"y\", \"z\"]\r\n lowerstring = mystring.lower()\r\n total = 0\r\n for i in lowerstring:\r\n if i in alpha:\r\n total += 1\r\n return total\r\n\r\ndef percfreq(freqs, total):\r\n percsdict = {\"A\": 0, \"B\": 0, \"C\": 0, \"D\": 0, \"E\": 0, \"F\": 0, \"G\": 0, \"H\": 0, \"I\": 0, \"J\": 0, \"K\": 0, \"L\": 0, \"M\": 0,\r\n \"N\": 0, \"O\": 0, \"P\": 0, \"Q\": 0, \"R\": 0, \"S\": 0, \"T\": 0, \"U\": 0, \"V\": 0, \"W\": 0, \"X\": 0, \"Y\": 0, \"Z\": 0}\r\n for letter in freqs:\r\n perc = (freqs[letter] / total) * 100\r\n percsdict[letter] = perc\r\n return percsdict\r\n\r\ndef main():\r\n mostfreq = [\"E\", \"T\", \"A\", \"O\", \"I\", \"N\", \"S\", \"H\", \"R\", \"D\", \"L\", \"C\", \"U\", \"M\", \"W\", \"F\", \"G\", \"Y\", \"P\", \"B\", \"V\",\r\n \"K\", \"J\", \"X\", \"Q\", \"Z\"]\r\n thestring = \"tivzgylyrdroohvmwblfgsvzwwivhhdsvivblfszevglhvmwgsvnlmvbzmwblfdrootvggsvkilwfxg/gsvzwwivhhrhhvevmgsivvlmvlmvdsvvohyziildiwblfdroohvvzpvbkzwlmgsvwllivmgvigsvhvxivgpvblmvlmvulfigdlzmwgsvwllidroolkvmblfdroonvvggsvylhhgsviv\"\r\n decode = \"\"\r\n upperstring = thestring.upper()\r\n frequencies = freqofeach(thestring)\r\n totals = numofletters(thestring)\r\n percentages = percfreq(frequencies, totals)\r\n mylist = []\r\n alphalist = []\r\n sorted_x = sorted(percentages.items(), key=operator.itemgetter(1))\r\n for item in sorted_x[::-1]:\r\n mylist.append(item)\r\n for thing in mylist:\r\n alphalist.append(thing[0])\r\n\r\n for letter in upperstring:\r\n if letter in mostfreq:\r\n index = alphalist.index(letter)\r\n decode += mostfreq[index]\r\n\r\n print(decode)\r\n #for key in percentages:\r\n\r\n\r\n\r\n\r\nmain()\r\n#print(freqofeach(\"tivzgylyrdroohvmwblfgsvzwwivhhdsvivblfszevglhvmwgsvnlmvbzmwblfdrootvggsvkilwfxg/gsvzwwivhhrhhvevmgsivvlmvlmvdsvvohyziildiwblfdroohvvzpvbkzwlmgsvwllivmgvigsvhvxivgpvblmvlmvulfigdlzmwgsvwllidroolkvmblfdroonvvggsvylhhgsviv\"))","repo_name":"robert-bayer/CPSCIClassWork","sub_path":"Files and Exceptions/Module12ClassActivity6_Paul_Robert.py","file_name":"Module12ClassActivity6_Paul_Robert.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43871434808","text":"# from cgitb import handler\nfrom cgitb import handler\nimport csv\n\n# with open(\"./Week_3_ATS/day_2/username.csv\", 'r') as f:\n# handler = csv.reader(f)\n# for row in handler:\n# print(row)\n \n# with open(\"./Week_3_ATS/day_2/new.csv\") as f:\n# handler = csv.writer(f)\n# handler.writerow([\"username\",\"school\",\"department\"])\n \n# with open(\"./Week_3_ATS/day_2/email.csv\", 'r') as f:\n# handler = csv.DictReader(f)\n \n# for row in handler:\n# print(row['Login email;Identifier;First name;Last name'])\n # print(row['school'])\n # print(row['department'])\n # print(row)\n \nwith open(\"./Week_3_ATS/day_2/username.csv\", 'w') as f:\n headers = [\"firstname\", \"lastname\", \"department\"]\n handler = csv.DictWriter(f, fieldnames=headers)\n handler.writeheader()\n handler.writerow({\"firstname\": \"Ahmad\", \"lastname\": \"Sharaf\", \"department\": \"CSE\"})\n handler.writerow({\"firstname\": \"Yasir\", \"lastname\": \"Alao\", \"department\": \"MEE\"})\n handler.writerow({\"firstname\": \"Ade\", \"lastname\": \"Kabiru\", \"department\": \"EEE\"})\n \n\n","repo_name":"ahmadsharafuddeen/ATS_TRAINING","sub_path":"Week_3_ATS/day_2/read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"41252900204","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\ndef f(c, h_patterns, w_patterns, black_count):\r\n # マスを塗りつぶしたかどうか\r\n used = [[False for _ in range(len(w_patterns))]\r\n for _ in range(len(h_patterns))]\r\n\r\n for index, h_pattern in enumerate(h_patterns):\r\n if h_pattern != 1:\r\n continue\r\n\r\n for w_dash in range(len(w_patterns)):\r\n if not used[index][w_dash] and c[index][w_dash] == '#':\r\n used[index][w_dash] = True\r\n black_count -= 1\r\n\r\n for index, w_pattern in enumerate(w_patterns):\r\n if w_pattern != 1:\r\n continue\r\n\r\n for h_dash in range(len(h_patterns)):\r\n if not used[h_dash][index] and c[h_dash][index] == '#':\r\n used[h_dash][index] = True\r\n black_count -= 1\r\n\r\n return black_count\r\n\r\n\r\ndef main():\r\n from itertools import product\r\n\r\n h, w, k = map(int, input().split())\r\n c = [list(input()) for _ in range(h)]\r\n black_count = 0\r\n ans = 0\r\n\r\n # KeyInsight:\r\n # 制約からbit全探索をすると楽に実装できる\r\n # △: リストのコピーができていない=参照渡しの理解が不十分\r\n # △: 愚直に塗りつぶす行と列を決めて、その中に黒マスが含まれていたら減らす方針に\r\n for hi in range(h):\r\n for wi in range(w):\r\n if c[hi][wi] == '#':\r\n black_count += 1\r\n\r\n for h_patterns in product([1, 0], repeat=h):\r\n for w_patterns in product([1, 0], repeat=w):\r\n fi = f(c, h_patterns, w_patterns, black_count)\r\n\r\n if fi == k:\r\n ans += 1\r\n\r\n print(ans)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"KATO-Hiro/AtCoder","sub_path":"ABC/abc151-abc200/abc173/c/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"25713125211","text":"'''\n6.希尔排序:\n希尔排序,也称递减增量排序算法,实质是分组插入排序。\n\n类似合并排序和插入排序的结合体,二路合并排序将原来的数组分成左右两部分,\n希尔排序则将数组按照一定的间隔分成几部分,每部分采用插入排序来排序,有意思的是这样做了之后,\n元素很多情况下就差不多在它应该呆的位置,所以效率不一定比插入排序差。\n时间复杂度 [O(nlogn),O(n^2)]\n空间复杂度 O(1)\n\n最好复杂度 O(n^1.3)\n\n不稳定\n\n希尔排序的基本思想是:将数组列在一个表中并对列分别进行插入排序,重复这过程,不过每次用更长的列(步长更长了,列数更少了)来进行。\n最后整个表就只有一列了。将数组转换至表是为了更好地理解这算法,算法本身还是使用数组进行排序。\n\n例如,假设有这样一组数[ 13 14 94 33 82 25 59 94 65 23 45 27 73 25 39 10 ],如果我们以步长为5开始进行排序,我们可以通过将这列表放在有5列的表中来更好地描述算法,这样他们就应该看起来是这样:\n\n13 14 94 33 82\n25 59 94 65 23\n45 27 73 25 39\n10\n然后我们对每列进行排序:\n\n10 14 73 25 23\n13 27 94 33 39\n25 59 94 65 82\n45\n将上述四行数字,依序接在一起时我们得到:[ 10 14 73 25 23 13 27 94 33 39 25 59 94 65 82 45 ]。这时10已经移至正确位置了,然后再以3为步长进行排序:\n\n10 14 73\n25 23 13\n27 94 33\n39 25 59\n94 65 82\n45\n排序之后变为:\n\n10 14 13\n25 23 33\n27 25 59\n39 65 73\n45 94 82\n94\n最后以1步长进行排序(此时就是简单的插入排序了)。\n'''\n\n\ndef shell_sort(a_list):\n #how many sublists, also how many elements in a sublist\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n gap_insertion_sort(a_list, start_position, sublist_count)\n print(\"After increments of size\", sublist_count, \"The list is\", a_list)\n sublist_count = sublist_count // 2\n\n# 取步长\ndef gap_insertion_sort(a_list, start, gap):\n #start+gap is the second element in this sublist\n for i in range(start + gap, len(a_list), gap):\n current_value = a_list[i]\n position = i\n while position >= gap and a_list[position - gap] > current_value:\n a_list[position] = a_list[position - gap] #move backward\n position = position - gap\n a_list[position] = current_value\n\n\na_list = [54, 26, 93, 17, 77, 31, 44, 55, 20, 88]\nshell_sort(a_list)\nprint(a_list)\n\n# 源码的步长的选择是从 n/2 开始,每次再减半,直至为0。步长的选择直接决定了希尔排序的复杂度。在维基百科上有对于步长串行的详细介绍。\ndef shell_sort2(ary):\n n = len(ary)\n gap = round(n/2) #初始步长 , 用round四舍五入取整\n while gap > 0 :\n for i in range(gap,n): #每一列进行插入排序 , 从gap 到 n-1\n temp = ary[i]\n j = i\n while ( j >= gap and ary[j-gap] > temp ): #插入排序\n ary[j] = ary[j-gap]\n j = j - gap\n ary[j] = temp\n gap = round(gap/2) #重新设置步长\n return ary\n\n\n\ndef shell_sort3(alist):\n \"\"\"希尔排序\"\"\"\n # n=9\n n = len(alist)\n # gap =4\n gap = n // 2\n # i = gap\n # for i in range(gap, n):\n # # i = [gap, gap+1, gap+2, gap+3... n-1]\n # while:\n # if alist[i] < alist[i-gap]:\n # alist[i], alist[i-gap] = alist[i-gap], alist[i]\n\n # gap变化到0之前,插入算法执行的次数\n while gap > 0:\n # 插入算法,与普通的插入算法的区别就是gap步长\n for j in range(gap, n):\n # j = [gap, gap+1, gap+2, gap+3, ..., n-1]\n i = j\n while i > 0:\n if alist[i] < alist[i-gap]:\n alist[i], alist[i-gap] = alist[i-gap], alist[i]\n i -= gap\n else:\n break\n # 缩短gap步长\n gap //= 2","repo_name":"DoranLiu/LiuFan_Python_Learning","sub_path":"Algorithm/Sort/Comparison_Sorting/Shell_Sort.py","file_name":"Shell_Sort.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28492422352","text":"# ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗\n# ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗█���║░░░░░██╔══██╗██╔══██╗██║░██╔╝\n# ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░\n# ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░\n# ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗\n# ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝\n#\n# Developed by Yakov V. Panov (C) Ling • Black 2020\n# @site http://ling.black\nfrom fastapi import HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom database import DatabaseUtils\nfrom database.actions import UserActions\nfrom database.models import UserMetaModel\n\n\nclass UserMetaActions:\n \"\"\"\n The user meta actions utility\n \"\"\"\n\n @staticmethod\n def set(db: Session, user_id: int, field: str, value: str):\n UserActions.check(db, user_id)\n \"\"\"\n Sets the meta value\n :param db:\n :param user_id:\n :param field:\n :param value:\n :return:\n \"\"\"\n db_meta = DatabaseUtils.core_query(\n db.query(UserMetaModel).filter(UserMetaModel.user_id == user_id)\n .filter(UserMetaModel.field == field)\n )\n if db_meta.count() > 0:\n db_meta.update({\"value\": value})\n db.commit()\n return db_meta.first()\n return DatabaseUtils.insert(\n db,\n db_item=UserMetaModel(\n user_id=user_id,\n field=field,\n value=value\n )\n )\n\n @staticmethod\n def list(db: Session, user_id: int, offset: int = 0, limit: int = 100):\n query = db.query(UserMetaModel).filter(UserMetaModel.user_id == user_id)\n return DatabaseUtils.limited_results_query(\n query,\n offset=offset,\n limit=limit\n )\n\n @staticmethod\n def get(db: Session, user_id: int, field: str, show_removed=False):\n \"\"\"\n Returns the user meta value\n :param show_removed:\n :param db:\n :param user_id:\n :param field:\n :return:\n \"\"\"\n return DatabaseUtils.core_query(\n db.query(UserMetaModel).filter(UserMetaModel.user_id == user_id).filter(UserMetaModel.field == field),\n show_removed\n ).first()\n\n @staticmethod\n def remove(db: Session, user_id: int, field: str):\n \"\"\"\n Removes the meta value\n :param db:\n :param user_id:\n :param field:\n :return:\n \"\"\"\n try:\n return DatabaseUtils.remove_query(\n db,\n db.query(UserMetaModel).filter(UserMetaModel.user_id == user_id, UserMetaModel.field == field)\n )\n except Exception:\n raise HTTPException(detail=f\"Meta field [{field}] is undefined (user_id: {user_id})!\", status_code=404)\n\n @staticmethod\n def recover(db: Session, user_id: int, field: str):\n \"\"\"\n Removes the meta value\n :param db:\n :param user_id:\n :param field:\n :return:\n \"\"\"\n try:\n return DatabaseUtils.recover_query(\n db,\n db.query(UserMetaModel).filter(UserMetaModel.user_id == user_id, UserMetaModel.field == field)\n )\n except Exception:\n raise HTTPException(detail=f\"Meta field [{field}] is undefined (user_id: {user_id})!\", status_code=404)\n","repo_name":"DiegoLing33/ling-simple-api","sub_path":"database/actions/user_meta_actions.py","file_name":"user_meta_actions.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"36478389609","text":"\"\"\"\nporc1 = 0.16\nbase1 = 700\nporc2 = 0.14\nbase2 = 650\nporc3 = 0.14\nbase3 = 600\nporc4 = 0.14\nbase4 = 550\nporc5 = 0.14\nbase5 = 500\nporc6 = 0.14\nbase6 = 400\n\nwhile True:\n print('=' * 50, 'CALCULE A COMISSÃO', '=' * 50)\n venda = float(input('Informe o valor da venda:'))\n if venda >= 100000:\n comissao = ( venda * porc1) + base1\n print(f'A comissão será de {comissao:.2f}')\n elif venda < 100000 and venda >= 80000:\n comissao = ( venda * porc2) + base2\n print(f'A comissão será de {comissao:.2f}')\n elif venda < 80000 and venda >= 60000:\n comissao = ( venda * porc3) + base3\n print(f'A comissão será de {comissao:.2f}')\n elif venda < 60000 and venda >= 40000:\n comissao = ( venda * porc4) + base4\n print(f'A comissão será de {comissao:.2f}')\n elif venda < 40000 and venda >= 20000:\n comissao = ( venda * porc5) + base5\n print(f'A comissão será de {comissao:.2f}')\n else:\n comissao = ( venda * porc6) + base6\n print(f'A comissão será de {comissao:.2f}')\n\"\"\"\nvalor_fixo1 = 400\nporcentagem1= 0.14\nvalor_fixo2 = 500\nporcentagem2 = 0.14\nvalor_fixo3 = 550\nporcentagem3 = 0.14\nvalor_fixo4 = 600\nporcentagem4 = 0.14\nvalor_fixo5 = 650\nporcentagem5 = 0.14\nvalor_fixo6 = 700\nporcentagem6 = 0.16\nwhile True:\n print('%' * 150)\n venda = float(input('Qual é o valor da venda:'))\n if venda < 20_000:\n comissao1 = valor_fixo1 + (venda * porcentagem1)\n print(f'O valor da comissão será de {comissao1:.2f}')\n elif venda >= 20_000 and venda < 40_000:\n comissao2 = valor_fixo2 + (venda * porcentagem2)\n print(f'O valor da comissão será de {comissao2:.2f}')\n elif venda >= 40_000 and venda < 60_000:\n comissao3 = valor_fixo3 + (venda * porcentagem3)\n print(f'O valor da comissão será de {comissao3:.2f}')\n elif venda >= 60_000 and venda < 80_000:\n comissao4 = valor_fixo4 + (venda * porcentagem4)\n print(f'O valor da comissão será de {comissao4:.2f}')\n elif venda >= 80_000 and venda < 100_000:\n comissao5 = valor_fixo5 + (venda * porcentagem5)\n print(f'O valor da comissão será de {comissao5:.2f}')\n else:\n comissao6 = valor_fixo6 + (venda * porcentagem6)\n print(f'O valor da comissão será de {comissao6:.2f}')\n\n\n","repo_name":"Tiago1Figueira/Curso-Python","sub_path":"secao5/exercicios5/Ex36.py","file_name":"Ex36.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8106938090","text":"import json\n\nfrom django import template\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.html import _json_script_escapes, format_html\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\n@register.filter\ndef startswith(value, arg):\n \"\"\"Usage, {% if value|starts_with:\"arg\" %}\"\"\"\n return value.startswith(arg)\n\n\n@register.filter(\"columns\")\ndef columns(items, columns=4):\n \"\"\"\n :param items: typically an iterable of forms\n \"\"\"\n if items:\n return [items[i : i + columns] for i in range(0, len(items), columns)]\n return None\n\n\n@register.filter(\"rows\")\ndef rows(items, rows=4):\n \"\"\"\n Transform the list so that it's evenly spread across n rows.\n \"\"\"\n if items:\n res = [[] for i in range(rows)]\n for i, item in enumerate(items):\n res[i % rows].append(item)\n return res\n return None\n\n\n@register.inclusion_tag(\"general/includes/rating.html\")\ndef review_rating(rating_pct, num_stars=5, max_rating=100):\n if not rating_pct:\n return {\"full\": [], \"half\": False, \"open\": range(num_stars)}\n full = int(rating_pct / max_rating * num_stars)\n empty = int((max_rating - rating_pct) / max_rating * num_stars)\n\n return {\n \"full\": range(full),\n \"half\": (full + empty) != num_stars,\n \"open\": range(empty),\n }\n\n\n@register.filter(is_safe=True)\ndef json_ld_script(value):\n json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)\n return format_html(\n '',\n mark_safe(json_str),\n )\n","repo_name":"modelbrouwers/modelbrouwers","sub_path":"src/brouwers/general/templatetags/brouwers.py","file_name":"brouwers.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"73499941956","text":"from solver.base_solver import BaseSolver\nfrom Models.HairSpatNet import HairSpatNet\nfrom Loss.loss import lovasz_hinge,uniform_sample_loss,probability_sample_loss,binary_cross_entropy,compute_gradient_penalty\nimport os\nimport torch\nimport torch.nn\nimport torch.nn.functional as F\nfrom Tools.utils import *\nfrom Models.Discriminator import Spat_Discriminator\nimport torch.autograd\nclass HairSpatNetSolver(BaseSolver):\n\n @staticmethod\n def modify_options(parser):\n parser.set_defaults(save_root='checkpoints/HairSpatNet')\n parser.set_defaults(data_mode='image')\n parser=HairSpatNet.modify_options(parser)\n parser.add_argument('--close_gt',default=False)\n parser.add_argument('--use_gan',action='store_true')\n parser.add_argument('--use_gt_Ori',action='store_true')\n return parser\n\n def initialize(self, opt):\n super().initialize(opt)\n self.opt = opt\n self.Spat_min_cha=opt.Spat_min_cha\n self.Spat_max_cha=opt.Spat_max_cha\n\n self.initialize_networks(opt)\n\n if self.opt.isTrain:\n self.classification_weight=1.0\n self.classification_sparse_weight=0.1\n self.optimizer,self.D_optimizer=self.create_optimizers()\n self.criteria=torch.nn.CrossEntropyLoss()\n self.L1loss=torch.nn.L1Loss()\n self.L1loss_cont=torch.nn.L1Loss()\n\n\n def initialize_networks(self,opt):\n self.net=HairSpatNet(opt,in_cha=opt.input_nc,min_cha=self.Spat_min_cha,max_cha=self.Spat_max_cha)\n self.net.print_network()\n if opt.continue_train or opt.isTrain is False:\n path = os.path.join(opt.current_path, opt.save_root, opt.check_name, 'checkpoint')\n if os.path.exists(path):\n self.net = self.load_network(self.net, 'HairSpatNet', opt.which_iter, opt)\n else:\n print(\" Training from Scratch! \")\n self.net.init_weights(opt.init_type, opt.init_variance)\n\n\n if opt.isTrain:\n self.Discriminator = Spat_Discriminator()\n self.Discriminator.print_network()\n if len(opt.gpu_ids) > 0:\n assert (torch.cuda.is_available())\n self.model=self.net\n self.model=self.model.cuda()\n if opt.isTrain:\n self.Discriminator=self.Discriminator.cuda()\n\n\n\n # self.GrowingNet.cuda()\n # self.model = torch.nn.DataParallel(self.net, self.opt.gpu_ids)\n # self.model_on_one_gpu = self.model.module.cuda()\n # # self.GrowingNet=self.GrowingNet.module\n # else:\n # self.model = self.net\n\n def create_optimizers(self):\n params = []\n params += list(self.model.parameters())\n optimizer = torch.optim.Adam(params, lr=self.learning_rate, betas=(0.9, 0.999))\n d_params=self.Discriminator.parameters()\n D_optimizer=torch.optim.Adam(d_params,lr=self.learning_rate*3,betas=(0.9, 0.999))\n\n return optimizer,D_optimizer\n\n\n def preprocess_input(self,datas):\n image = datas['image'].type(torch.float)\n gt_orientation = datas['gt_ori'].type(torch.float)\n gt_occ=datas['gt_occ']\n Ori2D = datas['Ori2D'].type(torch.float)\n depth=datas['depth'].type(torch.float)\n if self.use_gpu():\n image = image.cuda()\n gt_orientation = gt_orientation.cuda()\n gt_occ=gt_occ.cuda()\n Ori2D = Ori2D.cuda()\n depth=depth.cuda()\n # save_image(torch.cat([image,torch.zeros(1,1,256,256).cuda()],dim=1),'test_image.png')\n # save_image(depth,'test_depth.png')\n return image,gt_orientation,gt_occ,Ori2D,depth\n\n\n\n def use_gpu(self):\n return len(self.opt.gpu_ids) > 0\n\n\n def train(self,iter_counter,dataloader,visualizer):\n for epoch in iter_counter.training_epochs():\n if epoch>60:\n self.opt.use_gt_Ori=False\n iter_counter.record_epoch_start(epoch)\n for i, datas in enumerate(dataloader):\n self.init_losses()\n iter_counter.record_one_iteration()\n image,gt_orientation,gt_occ,Ori2D,depth= self.preprocess_input(datas)\n if self.opt.close_gt:\n close_gt=close_voxel(gt_occ,5)\n else:\n close_gt=gt_occ\n\n if torch.sum(depth)==0:\n depth=None\n depth=None\n out_ori, out_occ,self.G_loss['ori_loss'],self.G_loss['occ_loss'] = self.model(image,gt_occ,gt_orientation,depth_map=depth,no_use_depth=self.opt.no_use_depth)\n # out_ori, _,self.G_loss['ori_loss'],_ = self.model(image,gt_occ,gt_orientation,depth_map=depth,no_use_depth=self.opt.no_use_depth)\n\n if self.opt.use_gan:\n # alpha=torch.rand(size=[self.batch_size,1,1,1,1]).cuda()\n\n feature_fake=self.Discriminator(out_ori*gt_occ)\n feature_real=self.Discriminator(gt_orientation)\n self.D_loss['gradient_penalty'] = compute_gradient_penalty(self.Discriminator, gt_orientation.data,\n (out_ori * gt_occ).data)\n\n self.G_loss['content'] = self.L1loss_cont(feature_fake[2], feature_real[2]) * 0.01\n scores_for_fake=torch.mean(feature_fake[-1])\n scores_for_real=torch.mean(feature_real[-1])\n\n # self.G_loss['G_loss']=-scores_for_fake\n\n self.D_loss['D_loss']=scores_for_fake-scores_for_real\n self.D_loss['D_score_loss']=torch.mean(feature_real[-1]**2)*1e-3\n\n # grandient=torch.autograd.grad(outputs=feature_delta[-1],inputs=delta_orientation,grad_outputs=torch.ones(feature_delta[-1].size()).cuda(),create_graph=False,retain_graph=False)[0]\n\n\n if self.opt.use_gan:\n self.loss_backward(self.D_loss, self.D_optimizer,True)\n self.loss_backward(self.G_loss, self.optimizer,False)\n\n\n\n if iter_counter.needs_printing():\n\n losses = self.get_latest_losses()\n visualizer.print_current_errors(epoch, iter_counter.epoch_iter, losses, iter_counter.time_per_iter)\n if iter_counter.needs_displaying():\n # positive=torch.sigmoid(out_occ)>0.65\n out_occ[out_occ>=0.2]=1\n out_occ[out_occ<0.2]=0\n # out_occ=torch.where(positive,torch.ones_like(positive),torch.zeros_like(positive))\n visualizer.draw_ori(image,gt_orientation,out_ori*gt_occ,out_occ*out_ori,suffix='')\n\n if iter_counter.needs_saving():\n print('saving the latest model (epoch %d, total_steps %d)' %\n (epoch, iter_counter.total_steps_so_far))\n self.save_network(self.model, 'HairSpatNet', iter_counter.total_steps_so_far, self.opt)\n self.save_network(self.model, 'HairSpatNet', 'latest', self.opt)\n self.save_network(self.Discriminator, 'Discriminator', iter_counter.total_steps_so_far, self.opt)\n self.save_network(self.Discriminator,'Discriminator','latest',self.opt)\n\n iter_counter.record_current_iter()\n self.update_learning_rate(epoch)\n iter_counter.record_epoch_end()\n\n def test(self,dataloader):\n with torch.no_grad():\n datas = dataloader.generate_test_data()\n image, gt_orientation, gt_occ = self.preprocess_input(datas)\n out_ori, out_occ = self.model(image)\n\n pred_ori=out_ori*gt_occ\n pred_ori=pred_ori.permute(0,2,3,4,1)\n pred_ori=pred_ori.cpu().numpy()\n save_ori_as_mat(pred_ori,self.opt)\n\n\n\n\n\n\n\n def loss_backward(self, losses, optimizer,retain=False):\n optimizer.zero_grad()\n loss = sum(losses.values()).mean()\n loss.backward(retain_graph=retain)\n optimizer.step()\n\n def init_losses(self):\n self.total_loss = {}\n self.D_loss={}\n self.G_loss={}\n\n def get_latest_losses(self):\n self.total_loss={**self.D_loss,**self.G_loss}\n return self.total_loss\n\n def update_learning_rate(self, epoch):\n if epoch % 6 == 0 and epoch != 0:\n self.learning_rate = self.learning_rate // 2\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.learning_rate\n\n for param_group_d in self.D_optimizer.param_groups:\n param_group_d['lr'] = self.learning_rate\n","repo_name":"KeyuWu-CS/NeuralHDHair","sub_path":"Code/solver/HairSpatNetSolver.py","file_name":"HairSpatNetSolver.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"62"} +{"seq_id":"17002022801","text":"import rdtest\nimport renderdoc as rd\n\n\nclass VK_VS_Max_Desc_Set(rdtest.TestCase):\n demos_test_name = 'VK_VS_Max_Desc_Set'\n\n def check_capture(self):\n\n action = self.find_action(\"Draw\")\n\n self.check(action is not None)\n\n self.controller.SetFrameEvent(action.eventId, False)\n\n pipe: rd.PipeState = self.controller.GetPipelineState()\n\n # We only need to check the color output for the first vertex - if we got that, the test succeeded.\n # We're not testing VS out fetch in general here, just that it works when there's no spare descriptor set\n postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, 1)\n\n postvs_ref = {\n 0: {\n 'vtx': 0,\n 'idx': 0,\n 'vertOut.col': [1.0, 0.2, 0.75, 0.8],\n },\n }\n\n self.check_mesh_data(postvs_ref, postvs_data)\n","repo_name":"baldurk/renderdoc","sub_path":"util/test/tests/Vulkan/VK_VS_Max_Desc_Set.py","file_name":"VK_VS_Max_Desc_Set.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":7948,"dataset":"github-code","pt":"62"} +{"seq_id":"39718163669","text":"from distutils.core import setup, Extension\nimport platform\n\n\ndef platform_correct(plat=platform.platform()):\n if plat.lower().find('armv6l') > -1:\n print(\"Platform: Rasberry Pi\\n\")\n return True\n if plat.lower().find('raspberry_pi') > -1:\n print(\"Platform: Rasberry Pi\\n\")\n return True\n if plat.lower().find('armv7l') > -1:\n print(\"Platform: Banana Pi\\n\")\n return True\n else:\n return False\n\n\nif not platform_correct():\n print('Cannot build. You are required to have a Rasberry Pi or Bananna Pi platform.')\n exit(0)\n\nmodule1 = Extension('dht11_sensor',\n sources = ['_dht11_sensor.c','dht11_sensor.c']\n ,libraries=['wiringPi']\n ,extra_compile_args=['-std=gnu99'])\n\nsetup (name = 'WiringPi_DHT_Sensor',\n version = '1.0',\n author = 'Hein Puth',\n author_email = 'hein.puth@gmail.com',\n license = 'MIT',\n url = 'https://github.com/warkanum/WiringPi_DHT_Sensor_PyMod',\n description = 'Python C module to read DHT Sensor with WiringPi',\n ext_modules = [module1])","repo_name":"warkanum/WiringPi_DHT_Sensor_PyMod","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"3151204725","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 24 22:37:57 2021\n\n@author: talha\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nclass Tk_Visualize:\n \n def acc_val_graph(train_data,validation_data):\n fig, ax1 = plt.subplots()\n plt.title('Training and validation Accuracy')\n color = 'tab:red'\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Training accuracy', color=color)\n ax1.plot(train_data, 'b',color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() \n\n color = 'tab:blue'\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Validation accuracy', color=color) \n ax2.plot(validation_data,'b' ,color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout() \n plt.show()","repo_name":"mntalha/pycodes","sub_path":"tk_visualize.py","file_name":"tk_visualize.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74276746437","text":"import uuid\nfrom datetime import datetime\n\nfrom django.core.validators import MinLengthValidator\nfrom django.db import models\nfrom django_prometheus.models import ExportModelOperationsMixin\n\n\nclass LogSolicitacoesUsuario(\n ExportModelOperationsMixin('log_solicitacoes'), models.Model\n):\n \"\"\"Eventos de dados importantes para acompanhamento.\n\n Ex.: Fulano X executou a atividade Y no objeto W no dia DDDDMMAA\n \"\"\"\n\n ( # COMUNS AOS DOIS FLUXOS (PARTINDO DA ESCOLA E DA DRE)\n INICIO_FLUXO,\n CODAE_AUTORIZOU,\n TERCEIRIZADA_TOMOU_CIENCIA,\n TERCEIRIZADA_RECUSOU,\n CODAE_NEGOU,\n # ESPECIFICA DO PARTINDO DA DRE\n CODAE_PEDIU_REVISAO,\n DRE_REVISOU,\n # ESPECIFICA DO PARTINDO DA ESCOLA\n DRE_VALIDOU,\n DRE_PEDIU_REVISAO,\n DRE_NAO_VALIDOU,\n ESCOLA_REVISOU,\n CODAE_QUESTIONOU,\n TERCEIRIZADA_RESPONDEU_QUESTIONAMENTO,\n # \"BURLADO DO FLUXO\", PODE SER CHAMADO A QUALQUER MOMENTO COM AS DEVIDAS RESTRIÇÕES\n ESCOLA_CANCELOU,\n CODAE_NEGOU_CANCELAMENTO,\n DRE_CANCELOU,\n # ESPECIFICA DIETA ESPECIAL\n INICIO_FLUXO_INATIVACAO,\n CODAE_AUTORIZOU_INATIVACAO,\n CODAE_NEGOU_INATIVACAO,\n TERCEIRIZADA_TOMOU_CIENCIA_INATIVACAO,\n TERMINADA_AUTOMATICAMENTE_SISTEMA,\n # ESPECIFICA HOMOLOGACAO DE PRODUTO\n CODAE_PENDENTE_HOMOLOGACAO,\n CODAE_HOMOLOGADO,\n CODAE_NAO_HOMOLOGADO,\n CODAE_PEDIU_ANALISE_SENSORIAL,\n CODAE_CANCELOU_ANALISE_SENSORIAL,\n TERCEIRIZADA_CANCELOU,\n CODAE_SUSPENDEU,\n ESCOLA_OU_NUTRICIONISTA_RECLAMOU,\n CODAE_PEDIU_ANALISE_RECLAMACAO,\n CODAE_AUTORIZOU_RECLAMACAO,\n INATIVA,\n TERCEIRIZADA_CANCELOU_SOLICITACAO_HOMOLOGACAO,\n # ESPECIFICA RECLAMAÇÃO DE PRODUTO\n TERCEIRIZADA_RESPONDEU_RECLAMACAO,\n TERCEIRIZADA_RESPONDEU_ANALISE_SENSORIAL,\n CODAE_QUESTIONOU_UE,\n UE_RESPONDEU_RECLAMACAO,\n NUTRISUPERVISOR_RESPONDEU_RECLAMACAO,\n CODAE_QUESTIONOU_NUTRISUPERVISOR,\n CODAE_RECUSOU_RECLAMACAO,\n CODAE_QUESTIONOU_TERCEIRIZADA,\n CODAE_RESPONDEU_RECLAMACAO,\n # ESPECIFICA SOLICITAÇÂO CADASTRO PRODUTO\n TERCEIRIZADA_ATENDE_SOLICITACAO_CADASTRO,\n # ESPECIFICA SOLICITAÇÃO DE REMESSA\n INICIO_FLUXO_SOLICITACAO,\n DILOG_ENVIA_SOLICITACAO,\n DISTRIBUIDOR_CONFIRMA_SOLICITACAO,\n DISTRIBUIDOR_SOLICITA_ALTERACAO_SOLICITACAO,\n PAPA_CANCELA_SOLICITACAO,\n PAPA_AGUARDA_CONFIRMACAO_CANCELAMENTO_SOLICITACAO,\n DISTRIBUIDOR_CONFIRMA_CANCELAMENTO,\n # ESPECIFICA SOLICITAÇÃO DE ALTERACAO\n DILOG_ACEITA_ALTERACAO,\n DILOG_NEGA_ALTERACAO,\n CANCELADO_ALUNO_MUDOU_ESCOLA,\n CANCELADO_ALUNO_NAO_PERTENCE_REDE,\n # MEDICAO INICIAL\n MEDICAO_EM_ABERTO_PARA_PREENCHIMENTO_UE,\n MEDICAO_ENVIADA_PELA_UE,\n # CRONOGRAMA\n CRONOGRAMA_CRIADO,\n CRONOGRAMA_ENVIADO_AO_FORNECEDOR,\n CRONOGRAMA_ASSINADO_PELO_FORNECEDOR,\n FORNECEDOR_SOLICITA_ALTERACAO_CRONOGRAMA,\n SUSPENSO_EM_ALGUNS_EDITAIS,\n ATIVO_EM_ALGUNS_EDITAIS,\n CRONOGRAMA_ASSINADO_PELO_USUARIO_CRONOGRAMA,\n CODAE_ATUALIZOU_PROTOCOLO,\n # MEDICAO INICIAL - CONTINUACAO\n MEDICAO_CORRECAO_SOLICITADA,\n MEDICAO_CORRIGIDA_PELA_UE,\n MEDICAO_APROVADA_PELA_DRE,\n MEDICAO_APROVADA_PELA_CODAE,\n CRONOGRAMA_ASSINADO_PELA_DINUTRE,\n CRONOGRAMA_ASSINADO_PELA_CODAE,\n VINCULO_DO_EDITAL_AO_PRODUTO,\n CRONOGRAMA_CIENTE_SOLICITACAO_ALTERACAO,\n FORNECEDOR_CIENTE_SOLICITACAO_ALTERACAO,\n APROVADO_DINUTRE_SOLICITACAO_ALTERACAO,\n REPROVADO_DINUTRE_SOLICITACAO_ALTERACAO,\n APROVADO_DILOG_SOLICITACAO_ALTERACAO,\n REPROVADO_DILOG_SOLICITACAO_ALTERACAO,\n CODAE_CANCELOU_SOLICITACAO_CORRECAO,\n TERCEIRIZADA_CANCELOU_SOLICITACAO_CORRECAO,\n SOLICITACAO_ALTERACAO_CRONOGRAMA_EM_ANALISE,\n NOTIFICACAO_CRIADA,\n NOTIFICACAO_ENVIADA_FISCAL,\n MEDICAO_CORRECAO_SOLICITADA_CODAE,\n MEDICAO_CORRIGIDA_PARA_CODAE,\n NOTIFICACAO_SOLICITADA_ALTERACAO,\n NOTIFICACAO_ASSINADA_FISCAL,\n CODAE_ALTERA_CRONOGRAMA,\n ALTERACAO_CRONOGRAMA_ENVIADA_AO_FORNECEDOR,\n LAYOUT_ENVIADO_PARA_ANALISE,\n LAYOUT_SOLICITADO_CORRECAO,\n LAYOUT_APROVADO,\n LAYOUT_CORRECAO_REALIZADA,\n ) = range(92)\n\n STATUS_POSSIVEIS = (\n (INICIO_FLUXO, 'Solicitação Realizada'),\n (CODAE_AUTORIZOU, 'CODAE autorizou'),\n (TERCEIRIZADA_TOMOU_CIENCIA, 'Terceirizada tomou ciência'),\n (TERCEIRIZADA_RECUSOU, 'Terceirizada recusou'),\n (CODAE_NEGOU, 'CODAE negou'),\n (CODAE_PEDIU_REVISAO, 'CODAE pediu revisão'),\n (DRE_REVISOU, 'DRE revisou'),\n (DRE_VALIDOU, 'DRE validou'),\n (DRE_PEDIU_REVISAO, 'DRE pediu revisão'),\n (DRE_NAO_VALIDOU, 'DRE não validou'),\n (ESCOLA_REVISOU, 'Escola revisou'),\n (ESCOLA_CANCELOU, 'Escola cancelou'),\n (CODAE_NEGOU_CANCELAMENTO, 'CODAE negou cancelamento'),\n (DRE_CANCELOU, 'DRE cancelou'),\n (CODAE_QUESTIONOU, 'Questionamento pela CODAE'),\n (\n TERCEIRIZADA_RESPONDEU_QUESTIONAMENTO,\n 'Terceirizada respondeu questionamento',\n ), # noqa\n (INICIO_FLUXO_INATIVACAO, 'Escola solicitou cancelamento'),\n (CODAE_AUTORIZOU_INATIVACAO, 'CODAE autorizou cancelamento'),\n (CODAE_NEGOU_INATIVACAO, 'CODAE negou cancelamento'),\n (\n TERCEIRIZADA_TOMOU_CIENCIA_INATIVACAO,\n 'Terceirizada tomou ciência do cancelamento',\n ), # noqa\n (\n TERMINADA_AUTOMATICAMENTE_SISTEMA,\n 'Cancelada por atingir data de término',\n ), # noqa\n (CODAE_PENDENTE_HOMOLOGACAO, 'Pendente homologação da CODAE'),\n (CODAE_HOMOLOGADO, 'CODAE homologou'),\n (CODAE_NAO_HOMOLOGADO, 'CODAE não homologou'),\n (CODAE_PEDIU_ANALISE_SENSORIAL, 'CODAE pediu análise sensorial'),\n (CODAE_CANCELOU_ANALISE_SENSORIAL, 'CODAE cancelou análise sensorial'),\n (TERCEIRIZADA_CANCELOU, 'Terceirizada cancelou homologação'),\n (INATIVA, 'Homologação inativa'),\n (\n TERCEIRIZADA_CANCELOU_SOLICITACAO_HOMOLOGACAO,\n 'Terceirizada cancelou solicitação de homologação de produto',\n ),\n (CODAE_SUSPENDEU, 'CODAE suspendeu o produto'),\n (\n ESCOLA_OU_NUTRICIONISTA_RECLAMOU,\n 'Escola/Nutricionista reclamou do produto',\n ), # noqa\n (CODAE_PEDIU_ANALISE_RECLAMACAO, 'CODAE pediu análise da reclamação'),\n (CODAE_AUTORIZOU_RECLAMACAO, 'CODAE autorizou reclamação'),\n (CODAE_RECUSOU_RECLAMACAO, 'CODAE recusou reclamação'),\n (\n CODAE_QUESTIONOU_TERCEIRIZADA,\n 'CODAE questionou terceirizada sobre reclamação',\n ), # noqa\n (CODAE_QUESTIONOU_UE, 'CODAE questionou U.E. sobre reclamação'), # noqa\n (CODAE_RESPONDEU_RECLAMACAO, 'CODAE respondeu ao reclamante da reclamação'),\n (\n CODAE_QUESTIONOU_NUTRISUPERVISOR,\n 'CODAE questionou nutrisupervisor sobre reclamação',\n ),\n (TERCEIRIZADA_RESPONDEU_RECLAMACAO, 'Terceirizada respondeu a reclamação'),\n (UE_RESPONDEU_RECLAMACAO, 'U.E. respondeu a reclamação'),\n (\n NUTRISUPERVISOR_RESPONDEU_RECLAMACAO,\n 'Nutrisupervisor respondeu a reclamação',\n ),\n (\n TERCEIRIZADA_RESPONDEU_ANALISE_SENSORIAL,\n 'Terceirizada respondeu a análise',\n ), # noqa\n (INICIO_FLUXO_SOLICITACAO, 'Papa enviou a requisição'),\n (DILOG_ENVIA_SOLICITACAO, 'Dilog Enviou a requisição'),\n (\n DISTRIBUIDOR_CONFIRMA_SOLICITACAO,\n 'Distribuidor confirmou requisição',\n ), # noqa\n (\n DISTRIBUIDOR_SOLICITA_ALTERACAO_SOLICITACAO,\n 'Distribuidor pede alteração da requisição',\n ), # noqa\n (PAPA_CANCELA_SOLICITACAO, 'Papa cancelou a requisição'),\n (PAPA_AGUARDA_CONFIRMACAO_CANCELAMENTO_SOLICITACAO, 'Papa aguarda confirmação do cancelamento da solicitação'),\n (DISTRIBUIDOR_CONFIRMA_CANCELAMENTO, 'Distribuidor confirmou cancelamento e Papa cancelou a requisição'),\n (DILOG_ACEITA_ALTERACAO, 'Dilog Aceita Alteração'),\n (DILOG_NEGA_ALTERACAO, 'Dilog Nega Alteração'),\n (\n CANCELADO_ALUNO_MUDOU_ESCOLA,\n 'Cancelamento por alteração de unidade educacional',\n ),\n (\n CANCELADO_ALUNO_NAO_PERTENCE_REDE,\n 'Cancelamento para aluno não matriculado na rede municipal',\n ),\n (MEDICAO_EM_ABERTO_PARA_PREENCHIMENTO_UE, 'Em aberto para preenchimento pela UE'),\n (MEDICAO_ENVIADA_PELA_UE, 'Enviado pela UE'),\n (MEDICAO_CORRECAO_SOLICITADA, 'Correção solicitada'),\n (MEDICAO_CORRIGIDA_PELA_UE, 'Corrigido para DRE'),\n (MEDICAO_APROVADA_PELA_DRE, 'Aprovado pela DRE'),\n (MEDICAO_APROVADA_PELA_CODAE, 'Aprovado pela CODAE'),\n (CRONOGRAMA_CRIADO, 'Cronograma Criado'),\n (CRONOGRAMA_ENVIADO_AO_FORNECEDOR, 'Assinado e Enviado ao Fornecedor'),\n (CRONOGRAMA_ASSINADO_PELO_FORNECEDOR, 'Assinado Fornecedor'),\n (FORNECEDOR_SOLICITA_ALTERACAO_CRONOGRAMA, 'Solicitada Alteração'),\n (SUSPENSO_EM_ALGUNS_EDITAIS, 'Suspenso em alguns editais'),\n (ATIVO_EM_ALGUNS_EDITAIS, 'Ativo em alguns editais'),\n (CRONOGRAMA_ASSINADO_PELO_USUARIO_CRONOGRAMA, 'Assinado Cronograma'),\n (CODAE_ATUALIZOU_PROTOCOLO, 'CODAE Atualizou o protocolo'),\n (CRONOGRAMA_ASSINADO_PELA_DINUTRE, 'Assinado DINUTRE'),\n (CRONOGRAMA_ASSINADO_PELA_CODAE, 'Assinado CODAE'),\n (VINCULO_DO_EDITAL_AO_PRODUTO, 'Vínculo do Edital ao Produto'),\n (CRONOGRAMA_CIENTE_SOLICITACAO_ALTERACAO, 'Cronograma Ciente'),\n (FORNECEDOR_CIENTE_SOLICITACAO_ALTERACAO, 'Fornecedor Ciente'),\n (APROVADO_DINUTRE_SOLICITACAO_ALTERACAO, 'Aprovado DINUTRE'),\n (REPROVADO_DINUTRE_SOLICITACAO_ALTERACAO, 'Reprovado DINUTRE'),\n (APROVADO_DILOG_SOLICITACAO_ALTERACAO, 'Aprovado DILOG'),\n (REPROVADO_DILOG_SOLICITACAO_ALTERACAO, 'Reprovado DILOG'),\n (CODAE_CANCELOU_SOLICITACAO_CORRECAO, 'CODAE cancelou solicitação de correção'),\n (TERCEIRIZADA_CANCELOU_SOLICITACAO_CORRECAO, 'Terceirizada cancelou solicitação de correção'),\n (SOLICITACAO_ALTERACAO_CRONOGRAMA_EM_ANALISE, 'Em Análise'),\n (NOTIFICACAO_CRIADA, 'Notificação criada'),\n (NOTIFICACAO_ENVIADA_FISCAL, 'Notificação enviada para o fiscal'),\n (MEDICAO_CORRECAO_SOLICITADA_CODAE, 'Correção solicitada pela CODAE'),\n (MEDICAO_CORRIGIDA_PARA_CODAE, 'Corrigido para CODAE'),\n (CODAE_ALTERA_CRONOGRAMA, 'Alteração CODAE'),\n (ALTERACAO_CRONOGRAMA_ENVIADA_AO_FORNECEDOR, 'Alteração enviada ao fornecedor'),\n (LAYOUT_ENVIADO_PARA_ANALISE, 'Layout enviado para análise'),\n (LAYOUT_SOLICITADO_CORRECAO, 'Layout solicitado correção'),\n (LAYOUT_APROVADO, 'Layout aprovado'),\n (LAYOUT_CORRECAO_REALIZADA, 'Layout correção realizada'),\n )\n ( # DA ESCOLA\n SOLICITACAO_KIT_LANCHE_AVULSA,\n ALTERACAO_DE_CARDAPIO,\n SUSPENSAO_DE_CARDAPIO,\n INVERSAO_DE_CARDAPIO,\n INCLUSAO_ALIMENTACAO_NORMAL,\n INCLUSAO_ALIMENTACAO_CEI,\n SUSPENSAO_ALIMENTACAO_CEI,\n INCLUSAO_ALIMENTACAO_CONTINUA,\n DIETA_ESPECIAL,\n # DA DRE\n SOLICITACAO_KIT_LANCHE_UNIFICADA,\n # DA TERCEIRIZADA\n HOMOLOGACAO_PRODUTO,\n # PRODUTOS\n RECLAMACAO_PRODUTO,\n # DA LOGISTICA ABASTECIMENTO\n SOLICITACAO_REMESSA_PAPA,\n SOLICITACAO_DE_ALTERACAO_REQUISICAO,\n ABASTECIMENTO_GUIA_DE_REMESSA,\n MEDICAO_INICIAL,\n INCLUSAO_ALIMENTACAO_CEMEI,\n SOLICITACAO_KIT_LANCHE_CEMEI,\n CRONOGRAMA,\n SOLICITACAO_DE_ALTERACAO_CRONOGRAMA,\n NOTIFICACAO_OCORRENCIA_GUIA,\n LAYOUT_DE_EMBALAGEM\n ) = range(22)\n\n TIPOS_SOLICITACOES = (\n (SOLICITACAO_KIT_LANCHE_AVULSA, 'Solicitação de kit lanche avulsa'),\n (ALTERACAO_DE_CARDAPIO, 'Alteração do tipo de alimentação'),\n (SUSPENSAO_DE_CARDAPIO, 'Suspensão de cardápio'),\n (INVERSAO_DE_CARDAPIO, 'Inversão de cardápio'),\n (INCLUSAO_ALIMENTACAO_NORMAL, 'Inclusão de alimentação normal'),\n (INCLUSAO_ALIMENTACAO_CEI, 'Inclusão de alimentação da CEI'),\n (SUSPENSAO_ALIMENTACAO_CEI, 'Suspensão de alimentação da CEI'),\n (INCLUSAO_ALIMENTACAO_CONTINUA, 'Inclusão de alimentação contínua'),\n (DIETA_ESPECIAL, 'Dieta Especial'),\n (SOLICITACAO_KIT_LANCHE_UNIFICADA, 'Solicitação de kit lanche unificada'),\n (HOMOLOGACAO_PRODUTO, 'Homologação de Produto'),\n (RECLAMACAO_PRODUTO, 'Reclamação de Produto'),\n (TERCEIRIZADA_RESPONDEU_ANALISE_SENSORIAL, 'Responde Análise Sensorial'),\n (SOLICITACAO_REMESSA_PAPA, 'Solicitação de remessa'),\n (SOLICITACAO_DE_ALTERACAO_REQUISICAO, 'Solicitação de Ateração de requisição'),\n (ABASTECIMENTO_GUIA_DE_REMESSA, 'Abastecimento de guia de remessa'),\n (MEDICAO_INICIAL, 'Solicitação de medição inicial'),\n (INCLUSAO_ALIMENTACAO_CEMEI, 'Inclusão de Alimentação CEMEI'),\n (SOLICITACAO_KIT_LANCHE_CEMEI, 'Solicitação de kit lanche CEMEI'),\n (CRONOGRAMA, 'Cronograma'),\n (SOLICITACAO_DE_ALTERACAO_CRONOGRAMA, 'Solicitação de alteração do cronograma'),\n (NOTIFICACAO_OCORRENCIA_GUIA, 'Notificação de guia com ocorrência'),\n (LAYOUT_DE_EMBALAGEM, 'Layout de embalagem')\n )\n\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n criado_em = models.DateTimeField('Criado em', editable=False, auto_now_add=True)\n descricao = models.TextField('Descricao', blank=True)\n justificativa = models.TextField('Justificativa', blank=True)\n resposta_sim_nao = models.BooleanField('Resposta - Sim ou Não', default=False)\n status_evento = models.PositiveSmallIntegerField(choices=STATUS_POSSIVEIS)\n solicitacao_tipo = models.PositiveSmallIntegerField(choices=TIPOS_SOLICITACOES)\n uuid_original = models.UUIDField()\n usuario = models.ForeignKey('perfil.Usuario', on_delete=models.DO_NOTHING)\n\n class Meta:\n ordering = ('-criado_em',)\n\n @property\n def status_evento_explicacao(self):\n return self.get_status_evento_display()\n\n @property\n def get_anexos(self):\n return AnexoLogSolicitacoesUsuario.objects.filter(log=self)\n\n def __str__(self):\n data = datetime.strftime(self.criado_em, '%Y-%m-%d %H:%M:%S')\n return (\n f'{self.usuario} executou {self.get_status_evento_display()} '\n f'em {self.get_solicitacao_tipo_display()} no dia {data}'\n )\n\n\nclass AnexoLogSolicitacoesUsuario(\n ExportModelOperationsMixin('log_solicitacoes_anexo'), models.Model\n):\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n log = models.ForeignKey(\n LogSolicitacoesUsuario, related_name='anexos', on_delete=models.DO_NOTHING\n )\n nome = models.CharField(max_length=255, blank=True)\n arquivo = models.FileField()\n\n def __str__(self):\n return f'Anexo {self.uuid} - {self.nome}'\n\n\nclass Endereco(ExportModelOperationsMixin('endereco'), models.Model):\n logradouro = models.CharField(max_length=255, validators=[MinLengthValidator(5)])\n numero = models.IntegerField(null=True)\n complemento = models.CharField(max_length=50, blank=True)\n bairro = models.CharField(max_length=50)\n cep = models.IntegerField()\n\n\nclass Contato(ExportModelOperationsMixin('contato'), models.Model):\n nome = models.CharField('Nome', max_length=160, blank=True)\n telefone = models.CharField(\n max_length=13, validators=[MinLengthValidator(8)], blank=True\n )\n telefone2 = models.CharField(\n max_length=10, validators=[MinLengthValidator(8)], blank=True\n )\n celular = models.CharField(\n max_length=11, validators=[MinLengthValidator(8)], blank=True\n )\n email = models.EmailField(blank=True)\n eh_nutricionista = models.BooleanField('É nutricionista?', default=False)\n crn_numero = models.CharField('Nutricionista crn', max_length=160, blank=True)\n\n def __str__(self):\n if self.nome and self.telefone:\n return f'{self.nome}, {self.telefone}, {self.email}'\n elif self.telefone:\n return f'{self.telefone}, {self.email}'\n elif self.telefone2:\n return f'{self.telefone2}, {self.email}'\n else:\n return f'{self.email}'\n\n\nclass TemplateMensagem(ExportModelOperationsMixin('template_mensagem'), models.Model):\n \"\"\"Tem um texto base e troca por campos do objeto que entra como parâmetro.\n\n Ex:\n Olá @nome, a Alteração de cardápio #@identificador solicitada por @requerinte está em situação @status.\n \"\"\"\n\n ALTERACAO_CARDAPIO = 0\n INCLUSAO_ALIMENTACAO = 1\n INCLUSAO_ALIMENTACAO_CONTINUA = 2\n SUSPENSAO_ALIMENTACAO = 3\n SOLICITACAO_KIT_LANCHE_AVULSA = 4\n SOLICITACAO_KIT_LANCHE_UNIFICADA = 5\n INVERSAO_CARDAPIO = 6\n DIETA_ESPECIAL = 7\n HOMOLOGACAO_PRODUTO = 8\n\n CHOICES = (\n (ALTERACAO_CARDAPIO, 'Alteração do tipo de Alimentação'),\n (INCLUSAO_ALIMENTACAO, 'Inclusão de alimentação'),\n (INCLUSAO_ALIMENTACAO_CONTINUA, 'Inclusão de alimentação contínua'),\n (SUSPENSAO_ALIMENTACAO, 'Suspensão de alimentação'),\n (SOLICITACAO_KIT_LANCHE_AVULSA, 'Solicitação de kit lanche avulsa'),\n (SOLICITACAO_KIT_LANCHE_UNIFICADA, 'Solicitação de kit lanche unificada'),\n (INVERSAO_CARDAPIO, 'Inversão de cardápio'),\n (DIETA_ESPECIAL, 'Dieta Especial'),\n (HOMOLOGACAO_PRODUTO, 'Homologação de Produto'),\n )\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n tipo = models.PositiveSmallIntegerField(choices=CHOICES, unique=True)\n assunto = models.CharField('Assunto', max_length=256, blank=True)\n template_html = models.TextField('Template', blank=True)\n\n def __str__(self):\n return f'{self.get_tipo_display()}'\n\n class Meta:\n verbose_name = 'Template de mensagem'\n verbose_name_plural = 'Template de mensagem'\n\n\nclass CategoriaPerguntaFrequente(ExportModelOperationsMixin('cat_faq'), models.Model):\n nome = models.CharField('Nome', blank=True, max_length=100)\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass PerguntaFrequente(ExportModelOperationsMixin('faq'), models.Model):\n categoria = models.ForeignKey(\n 'CategoriaPerguntaFrequente', on_delete=models.PROTECT\n ) # noqa\n pergunta = models.TextField('Pergunta')\n resposta = models.TextField('Resposta')\n criado_em = models.DateTimeField(\n 'Criado em', editable=False, auto_now_add=True\n ) # noqa\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n\n def __str__(self):\n return self.pergunta\n\n\nclass NotificacaoException(Exception):\n pass\n\n\nclass Notificacao(models.Model):\n # Tipos de Notificação\n TIPO_NOTIFICACAO_ALERTA = 'ALERTA'\n TIPO_NOTIFICACAO_AVISO = 'AVISO'\n TIPO_NOTIFICACAO_PENDENCIA = 'PENDENCIA'\n\n TIPO_NOTIFICACAO_NOMES = {\n TIPO_NOTIFICACAO_ALERTA: 'Alerta',\n TIPO_NOTIFICACAO_AVISO: 'Aviso',\n TIPO_NOTIFICACAO_PENDENCIA: 'Pendência',\n }\n\n TIPO_NOTIFICACAO_CHOICES = (\n (TIPO_NOTIFICACAO_ALERTA, TIPO_NOTIFICACAO_NOMES[TIPO_NOTIFICACAO_ALERTA]),\n (TIPO_NOTIFICACAO_AVISO, TIPO_NOTIFICACAO_NOMES[TIPO_NOTIFICACAO_AVISO]),\n (TIPO_NOTIFICACAO_PENDENCIA, TIPO_NOTIFICACAO_NOMES[TIPO_NOTIFICACAO_PENDENCIA]),\n )\n\n # Categorias de Notificação\n CATEGORIA_NOTIFICACAO_REQUISICAO_DE_ENTREGA = 'REQUISICAO_DE_ENTREGA'\n CATEGORIA_NOTIFICACAO_ALTERACAO_REQUISICAO_ENTREGA = 'ALTERACAO_REQUISICAO_ENTREGA'\n CATEGORIA_NOTIFICACAO_GUIA_DE_REMESSA = 'GUIA_DE_REMESSA'\n CATEGORIA_NOTIFICACAO_CRONOGRAMA = 'CRONOGRAMA'\n CATEGORIA_NOTIFICACAO_SOLICITACAO_ALTERACAO_CRONOGRAMA = 'SOLICITACAO_ALTERACAO_CRONOGRAMA'\n CATEGORIA_NOTIFICACAO_ALTERACAO_CRONOGRAMA = 'ALTERACAO_CRONOGRAMA'\n\n CATEGORIA_NOTIFICACAO_NOMES = {\n CATEGORIA_NOTIFICACAO_REQUISICAO_DE_ENTREGA: 'Requisição de entrega',\n CATEGORIA_NOTIFICACAO_ALTERACAO_REQUISICAO_ENTREGA: 'Alteração de requisição de entrega',\n CATEGORIA_NOTIFICACAO_GUIA_DE_REMESSA: 'Guia de Remessa',\n CATEGORIA_NOTIFICACAO_CRONOGRAMA: 'Assinatura do Cronograma',\n CATEGORIA_NOTIFICACAO_SOLICITACAO_ALTERACAO_CRONOGRAMA: 'Solicitação de Alteração do Cronograma',\n CATEGORIA_NOTIFICACAO_ALTERACAO_CRONOGRAMA: 'Alteração do Cronograma'\n }\n\n CATEGORIA_NOTIFICACAO_CHOICES = (\n (CATEGORIA_NOTIFICACAO_REQUISICAO_DE_ENTREGA,\n CATEGORIA_NOTIFICACAO_NOMES[CATEGORIA_NOTIFICACAO_REQUISICAO_DE_ENTREGA]),\n\n (CATEGORIA_NOTIFICACAO_ALTERACAO_REQUISICAO_ENTREGA,\n CATEGORIA_NOTIFICACAO_NOMES[CATEGORIA_NOTIFICACAO_ALTERACAO_REQUISICAO_ENTREGA]),\n\n (CATEGORIA_NOTIFICACAO_GUIA_DE_REMESSA,\n CATEGORIA_NOTIFICACAO_NOMES[CATEGORIA_NOTIFICACAO_GUIA_DE_REMESSA]),\n\n (CATEGORIA_NOTIFICACAO_CRONOGRAMA,\n CATEGORIA_NOTIFICACAO_NOMES[CATEGORIA_NOTIFICACAO_CRONOGRAMA]),\n\n (CATEGORIA_NOTIFICACAO_SOLICITACAO_ALTERACAO_CRONOGRAMA,\n CATEGORIA_NOTIFICACAO_NOMES[CATEGORIA_NOTIFICACAO_SOLICITACAO_ALTERACAO_CRONOGRAMA]),\n\n (CATEGORIA_NOTIFICACAO_ALTERACAO_CRONOGRAMA,\n CATEGORIA_NOTIFICACAO_NOMES[CATEGORIA_NOTIFICACAO_ALTERACAO_CRONOGRAMA]),\n )\n\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n\n tipo = models.CharField(\n 'Tipo',\n max_length=15,\n choices=TIPO_NOTIFICACAO_CHOICES,\n default=TIPO_NOTIFICACAO_AVISO,\n )\n\n categoria = models.CharField(\n 'Categoria',\n max_length=50,\n choices=CATEGORIA_NOTIFICACAO_CHOICES,\n )\n\n titulo = models.CharField('Título', max_length=100, default='', blank=True)\n\n descricao = models.TextField('Descrição', max_length=5000, default='', blank=True)\n\n hora = models.TimeField('Hora', editable=False, auto_now_add=True)\n\n lido = models.BooleanField('Foi Lido?', default=False)\n\n resolvido = models.BooleanField('Foi resolvido?', default=False)\n\n usuario = models.ForeignKey('perfil.Usuario', on_delete=models.CASCADE, default='', null=True, blank=True)\n\n criado_em = models.DateTimeField('Criado em', editable=False, auto_now_add=True)\n\n link = models.CharField('Link', max_length=100, default='', blank=True)\n\n requisicao = models.ForeignKey('logistica.SolicitacaoRemessa', on_delete=models.CASCADE,\n related_name='notificacoes_da_requisicao', blank=True, null=True)\n\n solicitacao_alteracao = models.ForeignKey('logistica.SolicitacaoDeAlteracaoRequisicao',\n on_delete=models.CASCADE,\n related_name='notificacoes_da_solicitacao_alteracao',\n blank=True,\n null=True)\n\n guia = models.ForeignKey('logistica.Guia', on_delete=models.CASCADE, related_name='notificacoes_da_guia',\n blank=True, null=True)\n\n cronograma = models.ForeignKey('pre_recebimento.Cronograma', on_delete=models.CASCADE,\n related_name='notificacoes_do_cronograma', blank=True, null=True)\n\n class Meta:\n verbose_name = 'Notificação'\n verbose_name_plural = 'Notificações'\n\n def __str__(self):\n return self.titulo\n\n @classmethod\n def notificar(cls, tipo, categoria, titulo, descricao, usuario, link, # noqa C901\n requisicao=None, solicitacao_alteracao=None, guia=None, renotificar=True, cronograma=None):\n\n if tipo not in cls.TIPO_NOTIFICACAO_NOMES.keys():\n raise NotificacaoException(f'Tipo {tipo} não é um tipo válido.')\n\n if categoria not in cls.CATEGORIA_NOTIFICACAO_NOMES.keys():\n raise NotificacaoException(f'Categoria {categoria} não é uma categoria válida.')\n\n if not titulo:\n raise NotificacaoException(f'O título não pode ser vazio.')\n\n if not usuario:\n raise NotificacaoException(f'É necessário definir o usuário destinatário.')\n\n if not renotificar:\n notificacao_existente = cls.objects.filter(\n tipo=tipo,\n categoria=categoria,\n requisicao=requisicao,\n guia=guia,\n titulo=titulo,\n usuario=usuario,\n )\n\n if renotificar or not notificacao_existente:\n cls.objects.create(\n tipo=tipo,\n categoria=categoria,\n titulo=titulo,\n descricao=descricao,\n usuario=usuario,\n link=link,\n requisicao=requisicao,\n solicitacao_alteracao=solicitacao_alteracao,\n guia=guia,\n )\n\n @classmethod\n def resolver_pendencia(cls, titulo, requisicao=None, solicitacao_alteracao=None, guia=None):\n if not titulo:\n raise NotificacaoException(f'O título não pode ser vazio.')\n if not requisicao and not solicitacao_alteracao and not guia:\n raise NotificacaoException(f'É preciso informar uma requisição, solicitação de alteração ou guia para '\n f'resolver uma pendência.')\n\n pendencias = cls.objects.filter(\n tipo=Notificacao.TIPO_NOTIFICACAO_PENDENCIA,\n titulo=titulo,\n requisicao=requisicao,\n solicitacao_alteracao=solicitacao_alteracao,\n guia=guia,\n resolvido=False\n )\n pendencias.update(resolvido=True, lido=True)\n\n\nclass CentralDeDownload(models.Model):\n # Status Choice\n STATUS_EM_PROCESSAMENTO = 'EM_PROCESSAMENTO'\n STATUS_CONCLUIDO = 'CONCLUIDO'\n STATUS_ERRO = 'ERRO'\n\n STATUS_NOMES = {\n STATUS_EM_PROCESSAMENTO: 'Em processamento',\n STATUS_CONCLUIDO: 'Concluído',\n STATUS_ERRO: 'Erro'\n }\n\n STATUS_CHOICES = (\n (STATUS_EM_PROCESSAMENTO, STATUS_NOMES[STATUS_EM_PROCESSAMENTO]),\n (STATUS_CONCLUIDO, STATUS_NOMES[STATUS_CONCLUIDO]),\n (STATUS_ERRO, STATUS_NOMES[STATUS_ERRO])\n )\n\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n identificador = models.CharField('Nome do arquivo', max_length=200, default='')\n arquivo = models.FileField(blank=True, verbose_name='Arquivo', upload_to='cental_downloads')\n status = models.CharField(\n 'status',\n max_length=20,\n choices=STATUS_CHOICES,\n default=STATUS_EM_PROCESSAMENTO\n )\n msg_erro = models.CharField('Mensagem erro', max_length=300, blank=True)\n visto = models.BooleanField('Foi visto?', default=False)\n usuario = models.ForeignKey('perfil.Usuario', on_delete=models.CASCADE, default='', null=True, blank=True)\n criado_em = models.DateTimeField('Criado em', editable=False, auto_now_add=True)\n\n class Meta:\n verbose_name = 'Central de Download'\n verbose_name_plural = 'Central de Downloads'\n\n def __str__(self):\n return self.identificador\n\n def delete(self, using=None, keep_parents=False):\n if self.arquivo:\n self.arquivo.storage.delete(self.arquivo.name)\n super().delete()\n\n\nclass SolicitacaoAberta(models.Model):\n uuid_solicitacao = models.CharField(max_length=50)\n usuario = models.ForeignKey('perfil.Usuario', on_delete=models.DO_NOTHING)\n datetime_ultimo_acesso = models.DateTimeField()\n\n def __str__(self):\n retorno = f'Solicitação \"#{str(self.uuid_solicitacao).upper()[:5]}\"'\n retorno += f' está aberta e em edição pelo usuário \"{self.usuario}\"'\n return retorno\n","repo_name":"prefeiturasp/SME-Terceirizadas","sub_path":"sme_terceirizadas/dados_comuns/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":28332,"program_lang":"python","lang":"pt","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"12758746855","text":"import statistics\r\nfrom math import floor\r\n\r\n\r\ndef fuel_usage(start, dest):\r\n fuel = 0\r\n inc = 1 if dest > start else -1\r\n for i in range(start, dest + inc, inc):\r\n fuel += abs(start - i)\r\n return fuel\r\n\r\n\r\ndef solve():\r\n file = open(\"aoc_12-7_input.txt\", \"r\")\r\n file_lines = file.read().splitlines()\r\n file.close()\r\n\r\n input = [int(i) for i in file_lines[0].split(\",\")]\r\n mean = floor(statistics.mean(input))\r\n return sum([fuel_usage(i, mean) for i in input])\r\n\r\n\r\nprint(solve())\r\n","repo_name":"timbtlr/advent-of-code","sub_path":"src/2021/12-7/aoc_12-7_2.py","file_name":"aoc_12-7_2.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21870177664","text":"class Stack:\n def __init__(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n if not self.is_empty():\n return self.items.pop()\n else:\n return None\n\n def is_empty(self):\n return len(self.items) == 0\n\nnum = int(input(\"Enter number: \"))\n\nn = str(num)\nstack = Stack()\n\nfor digit in n:\n stack.push(digit)\n\nrnum = ''\nwhile not stack.is_empty():\n rnum += stack.pop()\n\nif n == rnum:\n print(\"number is a palindrome\")\n \nelse:\n print(\"not a palindrome number\")\n","repo_name":"onesixtwo/cpe-200","sub_path":"prelim/prelimprac.py","file_name":"prelimprac.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41252337994","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\ndef main():\r\n n = int(input())\r\n p = list(map(int, input().split()))\r\n count = 0\r\n\r\n for i in range(1, n - 1):\r\n value_max = max(p[i - 1], p[i], p[i + 1])\r\n value_min = min(p[i - 1], p[i], p[i + 1])\r\n\r\n if p[i] != value_max and p[i] != value_min:\r\n count += 1\r\n\r\n print(count)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"KATO-Hiro/AtCoder","sub_path":"ABC/abc101-abc150/abc132/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"3624427005","text":"import os\nos.environ['DATABASE_URL'] = 'sqlite://'\n\nfrom datetime import datetime, timedelta\nimport unittest\nfrom app import app, db\nfrom app.models import User, Post\nfrom flask_login import FlaskLoginClient\nfrom bs4 import BeautifulSoup\n\n\nclass TestRoutes(unittest.TestCase):\n def setUp(self):\n self.app_context = app.app_context()\n self.app_context.push()\n db.create_all()\n app.test_client_class = FlaskLoginClient\n self.client = app.test_client()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_homepage_no_login(self):\n \"\"\"\n Feature: Homepage defaults to Login screen when not logged in\n\n Scenario: Request homepage with no user logged in\n Given user isn't logged in\n When accessing the homepage, \"/\"\n Then the page is redirected to the login page, \"/login\"\n And the page contains the phrase \"Sign In\"\n \"\"\"\n response = self.client.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.request.path, \"/login\")\n self.assertIn(b\"

    Sign In

    \", response.data)\n\n def test_homepage_with_login(self):\n \"\"\"\n Feature: Homepage is viewed when user is logged in\n\n Scenario: Request homepage with user logged in\n Given user, \"susan\", is logged in\n When accessing the homepage, \"/\"\n Then the response path is \"/\"\n And the page contains the phrase \"Hi, susan!\"\n \"\"\"\n user_susan = User(username='susan')\n user_susan.set_password('cat')\n db.session.add(user_susan)\n db.session.commit()\n\n with app.test_client(user=user_susan) as client:\n response = client.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.request.path, \"/\")\n self.assertIn(b\"

    Hi, susan!

    \", response.data)\n\n def test_create_post(self):\n \"\"\"\n Feature: User can create post\n\n Scenario: Post is created by user\n Given user, \"susan\", is logged in\n When susan creates a post\n Then the post is saved to db\n And homepage is displayed\n And a message is displayed to say the post is now live\n And the post is displayed on the homepage\n \"\"\"\n user_susan = User(username='susan', email=\"susan@test.com\")\n user_susan.set_password('cat')\n db.session.add(user_susan)\n db.session.commit()\n\n with app.test_client(user=user_susan) as client:\n response = client.get(\"/\")\n soup = BeautifulSoup(response.data, \"html.parser\")\n csrf_token = soup.find(\"input\", {\"name\": \"csrf_token\"})[\"value\"]\n\n data = {\"post\": \"This is my first post!\", \"csrf_token\": csrf_token}\n response = client.post(\"/\", data=data, follow_redirects=True)\n\n posts = Post.query.all()\n self.assertEqual(len(posts), 1)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.request.path, \"/index\")\n\n self.assertIn(b\"Your post is now live!\", response.data)\n self.assertIn(b\"This is my first post!\", response.data)\n","repo_name":"oliver-masters/Flask_Micro_Blog","sub_path":"tests/test_routes.py","file_name":"test_routes.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6406209635","text":"import pytest\r\n\r\n#from list_node import ListNode\r\n#from my_list import MyList\r\nfrom test_data_My import *\r\n\r\n@pytest.mark.listnode\r\n@pytest.mark.parametrize\r\ndef Test_doubly_linked_list_1():\r\n lst = Doubly_List\r\n assert lst.is_empty() == True\r\n\r\n\r\n@pytest.mark.listnode\r\n@pytest.mark.parametrize\r\ndef Test_doubly_linked_list_1():\r\n lst = Doubly_List\r\n lst.insert(\"Петров\", 2000, 2019, {\"math\": 5, \"physics\": 4})\r\n lst.insert(\"Абуков\", 2001, 2020, {\"math\": 4, \"physics\": 4})\r\n lst.insert(\"Сергеев\", 1999, 2021, {\"math\": 3, \"physics\": 4})\r\n lst.insert(\"Бублов\", 2002, 2022, {\"math\": 3, \"physics\": 3})\r\n lst.insert(\"Балаболов\", 2004, 2023, {\"math\": 5, \"physics\": 5})\r\n\r\n@pytest.mark.listnode\r\n@pytest.mark.parametrize\r\ndef Test_doubly_linked_list_1():\r\n lst = Doubly_List\r\n assert lst.is_empty() == False\r\n\r\n","repo_name":"IIMatsuII/Tests_OOP","sub_path":"lists_test_My.py","file_name":"lists_test_My.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15591125961","text":"def main():\n\tactive = True\n\n\twhile active:\n\t\tprint(\"\\nChoose which calculator function you'd like to use: \")\n\t\tprint(\"Option 1 - Addition (Type in '1' to choose this option\")\n\t\tprint(\"Option 2 - Subtraction (Type in '2' to choose this option\")\n\t\tprint(\"Option 3 - Multiply (Type in '3' to choose this option\")\n\t\tprint(\"Option 4 - Division (Type in '4' to choose this option\")\n\t\tprint(\"Option 5 - Quit program (Type in '5' to choose this option\")\n\n\t\toption = raw_input(\"Type in your option now: \")\n\t\t\n\t\tif option == \"1\":\n\t\t\tfirstnum = raw_input(\"Type in your first number: \")\n\t\t\tsecondnum = raw_input(\"Type in your second number: \")\n\t\t\tprint(\"The answer is: \" + str(int(firstnum) + int(secondnum)))\n\t\telif option == \"2\":\n\t\t\tfirstnum = raw_input(\"Type in your first number: \")\n\t\t\tsecondnum = raw_input(\"Type in your second number: \")\n\t\t\tprint(\"The answer is: \" + str(int(firstnum) - int(secondnum)))\n\t\telif option == \"3\":\n\t\t\tfirstnum = raw_input(\"Type in your first number: \")\n\t\t\tsecondnum = raw_input(\"Type in your second number: \")\n\t\t\tprint(\"The answer is: \" + str(int(firstnum) * int(secondnum)))\n\t\telif option == \"4\":\n\t\t\tfirstnum = raw_input(\"Type in your first number: \")\n\t\t\tsecondnum = raw_input(\"Type in your second number: \")\n\t\t\tprint(\"The answer is: \" + str(int(firstnum) / int(secondnum)))\n\t\telse:\n\t\t\tprint(\"Thanks for playing!\")\n\t\t\tactive = False\n\t\nif __name__ == '__main__':\n\tmain()","repo_name":"sreedevik29/python-calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11058346839","text":"import numpy as np\nimport argparse\nimport arff\nfrom discretize import LAIMdiscretize\nfrom cross_validation import StratifiedKFold\nfrom dataset import Dataset\nimport shutil\n\n\ndef unique_rows(data,c):\n dict = {}\n for row in data:\n dict[tuple(row[c:])]=row[0:c]\n A = []\n for k in dict:\n A.append(list(dict[k])+list(k))\n return np.array(A)\n \n \nparser = argparse.ArgumentParser()\nparser.add_argument(\"dataset\", type=str, nargs=1,\n help='Specify a dataset name from data/ (es. nltcs)')\n\nparser.add_argument('--seed', type=int, nargs='?',\n default=1337,\n help='Seed for the random generator')\n\nparser.add_argument('-b', action='store_true', default=False,\n help='Whether the ARFF file contains labels at the beginning of the attributes list.')\n\nparser.add_argument('-c', type=int, nargs=1, default=1,\n help='Number of class labels.')\n\nparser.add_argument('-k', type=int, nargs=1, default=1,\n help='Number of folds.')\n\nparser.add_argument('-s', action='store_true', default=False,\n help='Shuffle for cross validation.')\n\n\n\n\nargs = parser.parse_args()\nn_labels = args.c[0]\nendian_big = args.b\nn_folds = args.k[0]\nshuffle = args.s\n(dataset_name_,) = args.dataset\n\ndataset_name = \"data/\" + dataset_name_\n\nshutil.copy(dataset_name + \".orig.arff\", dataset_name + \".arff\")\n\n# first in big endian\nif endian_big == False:\n Dataset.arff_to_big_endian(dataset_name + \".arff\", dataset_name_, n_labels)\n\n# Discretize the dataset\n\ndata = Dataset.load_arff(dataset_name + \".arff\", n_labels, endian = \"big\", input_feature_type = 'float', encode_nominal = True)\nD = LAIMdiscretize(data)\nD.discretize()\n\ndiscretized_data_matrix = np.concatenate((data['Y'],D.X_discretized), axis=1)\n\nUniques = unique_rows(discretized_data_matrix,data['Y'].shape[1])\n\nprint(\"Unique \", discretized_data_matrix.shape[0], Uniques.shape[0])\n\ndata_frame = arff.load(open(dataset_name + \".arff\", 'r'), encode_nominal = True, return_type=arff.DENSE)\ndata_frame['data'] = discretized_data_matrix.astype(int).tolist()\n# make the attributes nominal\nfor i in range(len(data_frame['attributes'])):\n (attr_name, attr_value) = data_frame['attributes'][i]\n data_frame['attributes'][i] = (attr_name, ['0', '1'])\n\ndiscretized_dataset = dataset_name + \".discr.arff\"\nf = open(discretized_dataset, \"w\")\narff.dump(data_frame, f)\nf.close()\n\ndiscretized_data = {}\ndiscretized_data['X'] = D.X_discretized\ndiscretized_data['Y'] = data['Y']\n\n\nSKF = StratifiedKFold(discretized_data, n_folds, shuffle, args.seed)\n\n(train_f, test_f) = SKF.run()\n\n\nfor k in range(n_folds):\n X_train = discretized_data['X'][train_f[k],:]\n Y_train = discretized_data['Y'][train_f[k],:]\n X_test = discretized_data['X'][test_f[k],:]\n Y_test = discretized_data['Y'][test_f[k],:]\n\n Dataset.dump_data_arff(discretized_dataset, dataset_name + \".f\" + str(k) + \".train.arff\", X_train, Y_train)\n Dataset.dump_data_arff(discretized_dataset, dataset_name + \".f\" + str(k) + \".test.arff\", X_test, Y_test)\n\n","repo_name":"nicoladimauro/dcsn","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"73471477638","text":"import numpy as np\nfrom tqdm.auto import tqdm\nfrom scipy.stats import entropy\nfrom .message import Message\nfrom .protocols import Protocol, DandelionProtocol\nfrom .adversary import Adversary\nfrom .network import Network\nfrom typing import Optional, List, Iterable\n\n\nclass Simulator:\n \"\"\"\n Abstraction to simulate message passing on a P2P network\n\n Parameters\n ----------\n adversary : adversary.Adversary\n adversary that observe messages in the P2P network\n num_msg : Optional[int] (Default: 10)\n number of messages to simulate\n use_node_weights : bool\n sample message sources with respect to node weights\n messages : Optional[List[Message]]\n Set messages manually\n seed: int (optional)\n Random seed (disabled by default)\n\n Examples\n --------\n Sample message sources with respect to stake distribution\n\n >>> from .network import *\n >>> from .adversary import Adversary\n >>> from .protocols import BroadcastProtocol\n >>> nw_gen = NodeWeightGenerator('stake')\n >>> ew_gen = EdgeWeightGenerator('normal')\n >>> net = Network(nw_gen, ew_gen, 10, 3)\n >>> protocol = BroadcastProtocol(net, broadcast_mode='all')\n >>> adversary = Adversary(protocol, 0.4)\n >>> simulator = Simulator(adversary, 20, use_node_weights=True)\n >>> len(simulator.messages)\n 20\n\n Set 5 messages originating from node 0\n\n >>> from .network import *\n >>> from .message import Message\n >>> from .adversary import Adversary\n >>> from .protocols import BroadcastProtocol\n >>> nw_gen = NodeWeightGenerator('stake')\n >>> ew_gen = EdgeWeightGenerator('normal')\n >>> net = Network(nw_gen, ew_gen, 10, 3)\n >>> protocol = BroadcastProtocol(net, broadcast_mode='all')\n >>> adversary = Adversary(protocol, 0.4)\n >>> simulator = Simulator(adversary, messages=[Message(0) for _ in range(5)])\n >>> len(simulator.messages)\n 5\n >>> simulator.message_sources\n [0, 0, 0, 0, 0]\n \"\"\"\n\n def __init__(\n self,\n adversary: Adversary,\n num_msg: Optional[int] = 10,\n use_node_weights: bool = False,\n messages: Optional[List[Message]] = None,\n seed: Optional[int] = None,\n verbose: bool = False,\n ):\n self._rng = np.random.default_rng(seed)\n if num_msg > 10:\n self.verbose = False\n else:\n self.verbose = verbose\n self.adversary = adversary\n self.use_node_weights = use_node_weights\n if messages != None:\n self._messages = messages\n elif num_msg != None:\n # NOTE: by default adversary nodes don't send messages in the simulation - they only observe\n self._messages = [\n Message(sender)\n for sender in self.adversary.protocol.network.sample_random_nodes(\n num_msg,\n replace=True,\n use_weights=use_node_weights,\n exclude=self.adversary.nodes,\n rng=self._rng,\n )\n ]\n else:\n raise ValueError(\"One of `num_msg` or `messages` should not be None!\")\n self._executed = False\n\n @property\n def messages(self):\n return self._messages\n\n @property\n def message_sources(self):\n return [msg.source for msg in self.messages]\n\n def run(\n self,\n coverage_threshold: float = 1.0,\n max_trials: int = 100,\n disable_progress_bar: bool = True,\n ) -> list:\n \"\"\"\n Run simulation\n\n Parameters\n ----------\n coverage_threshold : float\n stop propagating a message if it reached the given fraction of network nodes\n max_trials : int\n stop propagating a message if it does not reach any new nodes within `max_trials` steps\n\n Examples\n --------\n Run simulation until each message reaches 90% of all nodes\n\n >>> from .network import *\n >>> from .adversary import Adversary\n >>> from .protocols import BroadcastProtocol\n >>> seed = 42\n >>> nw_gen = NodeWeightGenerator('stake')\n >>> ew_gen = EdgeWeightGenerator('normal')\n >>> net = Network(nw_gen, ew_gen, 10, 3, seed=seed)\n >>> protocol = BroadcastProtocol(net, broadcast_mode='all', seed=seed)\n >>> adversary = Adversary(protocol, 0.4, seed=seed)\n >>> simulator = Simulator(adversary, 5, use_node_weights=True, seed=seed)\n >>> len(simulator.messages)\n 5\n >>> simulator.run(coverage_threshold=0.9)\n [0.9, 0.9, 0.9, 0.9, 0.9]\n \"\"\"\n coverage_for_messages = []\n for msg in tqdm(self.messages, disable=disable_progress_bar):\n node_coverage = 0.0\n delta = 1.0\n num_trials = 0\n while node_coverage < coverage_threshold and num_trials < max_trials:\n old_node_coverage = node_coverage\n node_coverage, spreading_phase, stop = msg.process(self.adversary)\n if stop:\n break\n if node_coverage > old_node_coverage:\n num_trials = 0\n else:\n num_trials += 1\n if self.verbose:\n print(msg.mid, node_coverage, num_trials)\n # NOTE: flushing message queue is essetial to correctly calculate the deanonymization power of the adversary\n msg.flush_queue(self.adversary)\n if self.verbose:\n print()\n coverage_for_messages.append(node_coverage)\n self._executed = True\n return coverage_for_messages\n\n def node_contact_time_quantiles(\n self, q=np.arange(0.1, 1.0, 0.1)\n ) -> Iterable[np.array]:\n \"\"\"\n Calculate the mean and the standard deviation for first node contact time quantiles\n\n Parameters\n ----------\n q : list (Default: numpy.arange(0.1, 1.0, 0.1)))\n Node quantiles\n\n Examples\n --------\n Observe the mean and standard deviation of propagation times until the messages reach 50% and 95% of all nodes.\n\n >>> from .network import *\n >>> from .adversary import Adversary\n >>> from .protocols import BroadcastProtocol\n >>> seed = 42\n >>> nw_gen = NodeWeightGenerator('stake')\n >>> ew_gen = EdgeWeightGenerator('normal')\n >>> net = Network(nw_gen, ew_gen, 10, 3, seed=seed)\n >>> protocol = BroadcastProtocol(net, broadcast_mode='all', seed=seed)\n >>> adversary = Adversary(protocol, 0.4, seed=seed)\n >>> simulator = Simulator(adversary, 5, use_node_weights=True, seed=seed)\n >>> _ = simulator.run()\n >>> mean_t, std_t = simulator.node_contact_time_quantiles(q=[0.5,0.95])\n >>> # messages on average reach 50% of all nodes within 273 milliseconds.\n >>> mean_t\n array([273.25546384, 460.11754328])\n >>> std_t\n array([24.51004285, 11.76789887])\n \"\"\"\n if self._executed:\n contact_time_quantiles = []\n for msg in self.messages:\n first_contact_times = [\n contasts[0].delay for node, contasts in msg.history.items()\n ]\n contact_time_quantiles.append(list(np.quantile(first_contact_times, q)))\n quantile_mx = np.array(contact_time_quantiles)\n mean_quantiles = np.mean(quantile_mx, axis=0)\n std_quantiles = np.std(quantile_mx, axis=0)\n return (mean_quantiles, std_quantiles)\n else:\n raise RuntimeError(\n \"Execute the `run()` function before querying node contact times!\"\n )\n\n\nclass Evaluator:\n \"\"\"\n Measures the deanonymization performance of the adversary for a given simulation\n\n Parameters\n ----------\n simulator : Simulator\n Specify the simulation to evaluate\n estimator : {'first_reach', 'first_sent', 'dummy'}, default 'first_reach'\n Define adversary stategy to predict source node for each message:\n * first_reach: the node from whom the adversary first heard the message is assigned 1.0 probability while every other node receives zero.\n * first_sent: the node that sent the message the earliest to the receiver\n * dummy: the probability is divided equally between non-adversary nodes.\n\n Examples\n --------\n Observe the complete evaluation pipeline below. First, initialize network, protocol, adversary. Then, simulate 20 messages with these components. Finally, query the report using the first sent estimator for the aversary.\n\n >>> from .network import *\n >>> from .adversary import Adversary\n >>> from .protocols import BroadcastProtocol\n >>> seed = 42\n >>> nw_gen = NodeWeightGenerator('stake')\n >>> ew_gen = EdgeWeightGenerator('normal')\n >>> net = Network(nw_gen, ew_gen, 10, 3, seed=seed)\n >>> protocol = BroadcastProtocol(net, broadcast_mode='all', seed=seed)\n >>> adversary = Adversary(protocol, 0.4, seed=seed)\n >>> simulator = Simulator(adversary, 20, use_node_weights=True, seed=seed)\n >>> _ = simulator.run()\n >>> evaluator = Evaluator(simulator, estimator='first_sent')\n >>> evaluator.get_report()\n {'estimator': 'first_sent', 'hit_ratio': 0.25, 'inverse_rank': 0.32499999999999996, 'entropy': 0.0, 'ndcg': 0.46679861973841597, 'message_spread_ratio': 1.0}\n \"\"\"\n\n def __init__(self, simulator: Simulator, estimator: str = \"first_reach\"):\n self.simulator = simulator\n self.estimator = estimator\n self.probas = simulator.adversary.predict_msg_source(estimator=self.estimator)\n # method='first' is used to properly resolve ties for calculating exact hits\n self.proba_ranks = self.probas.rank(axis=1, ascending=False, method=\"first\")\n\n @property\n def num_nodes(self):\n return self.simulator.adversary.protocol.network.num_nodes\n\n @property\n def message_spread_ratios(self):\n return [len(msg.history) / self.num_nodes for msg in self.simulator.messages]\n\n @property\n def exact_hits(self):\n hits = []\n for msg in self.simulator.messages:\n # adversary might not see every message\n if (\n msg.mid in self.probas.index\n and self.proba_ranks.loc[msg.mid, msg.source] == 1.0\n ):\n hits.append(1.0)\n else:\n hits.append(0.0)\n return np.array(hits)\n\n @property\n def ranks(self):\n ranks = []\n for msg in self.simulator.messages:\n # adversary might not see every message\n if msg.mid in self.probas.index:\n ranks.append(self.proba_ranks.loc[msg.mid, msg.source])\n else:\n # passive approach: let's suppose we make the worst prediction\n ranks.append(self.num_nodes)\n return np.array(ranks)\n\n @property\n def inverse_ranks(self):\n return 1.0 / self.ranks\n\n @property\n def ndcg_scores(self):\n scores = []\n for msg in self.simulator.messages:\n # adversary might not see every message\n if msg.mid in self.probas.index:\n rank = self.proba_ranks.loc[msg.mid, msg.source]\n else:\n # passive approach: let's suppose we make the worst prediction\n rank = self.num_nodes\n ndcg = 1.0 / np.log2(1.0 + rank)\n scores.append(ndcg)\n return np.array(scores)\n\n @property\n def entropies(self):\n rnd_entropy = entropy(1.0 / self.num_nodes * np.ones(self.num_nodes), base=2)\n entropies = []\n for msg in self.simulator.messages:\n # adversary might not see every message\n if msg.mid in self.probas.index:\n entropies.append(entropy(self.probas.loc[msg.mid].values, base=2))\n else:\n entropies.append(rnd_entropy)\n return np.array(entropies)\n\n def get_report(self) -> dict:\n \"\"\"Calculate mean performance of the adversary for the given simulation\"\"\"\n return {\n \"estimator\": self.estimator,\n \"hit_ratio\": np.mean(self.exact_hits),\n \"inverse_rank\": np.mean(self.inverse_ranks),\n \"entropy\": np.mean(self.entropies),\n \"ndcg\": np.mean(self.ndcg_scores),\n \"message_spread_ratio\": np.mean(self.message_spread_ratios),\n }\n","repo_name":"ferencberes/ethp2psim","sub_path":"ethp2psim/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":12459,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"8880923557","text":"#!/usr/bin/env python3\n\nimport collections\nimport csv\nimport pathlib\nimport re\nimport shutil\nimport ck2parser\n\nrootpath = pathlib.Path('C:/Users/Nicholas/Documents/CK2')\nswmhpath = rootpath / 'SWMH-BETA/SWMH'\n\ndef get_cultures():\n cultures = []\n for path in ck2parser.files('common/cultures/*.txt', swmhpath):\n tree = ck2parser.parse_file(path)\n cultures.extend(n2.val for _, v in tree for n2, v2 in v\n if n2.val != 'graphical_cultures')\n return cultures\n\ndef get_province_id(where):\n tree = ck2parser.parse_file(where / 'map/default.map')\n defs = next(v.val for n, v in tree if n.val == 'definitions')\n id_name = {}\n with (where / 'map' / defs).open(newline='', encoding='cp1252') as csvfile:\n for row in csv.reader(csvfile, dialect='ckii'):\n try:\n id_name[int(row[0])] = row[4]\n except (IndexError, ValueError):\n continue\n province_id = {}\n for path in ck2parser.files('history/provinces/*.txt', where):\n number, name = path.stem.split(' - ')\n number = int(number)\n if id_name[number] == name:\n tree = ck2parser.parse_file(path)\n try:\n title = next(v.val for n, v in tree if n.val == 'title')\n except StopIteration:\n continue\n province_id[title] = number\n return province_id\n\ndef prepend_post_comment(item, s):\n if item.post_comment:\n s += ' ' + str(item.post_comment)\n item.post_comment = ck2parser.Comment(s)\n\nkingdoms_for_barony_swap = [\n 'k_bulgaria', 'k_serbia', 'k_bosnia', 'k_croatia', 'k_hungary',\n 'k_denmark', 'k_norway', 'k_finland', 'k_pomerania', 'k_terra',\n 'k_lithuania', 'k_taurica', 'k_khazaria' 'k_alania', 'k_volga_bulgaria',\n 'k_bjarmia', 'k_perm']\n\ndef main():\n build = swmhpath / 'build'\n build_lt = build / 'common/landed_titles'\n while build.exists():\n print('Removing old build...')\n shutil.rmtree(str(build), ignore_errors=True)\n build_lt.mkdir(parents=True)\n\n province_id = get_province_id(swmhpath)\n localisation = ck2parser.localisation(swmhpath)\n cultures = get_cultures()\n ck2parser.fq_keys = cultures\n\n def update_tree(v, swap_baronies=False):\n for n2, v2 in v:\n if isinstance(n2, ck2parser.String):\n if ck2parser.is_codename(n2.val):\n for n3, v3 in v2:\n if n3.val == 'capital':\n prov_key = 'PROV{}'.format(v3.val)\n capital_name = localisation[prov_key]\n if not v3.post_comment:\n v3.post_comment = ck2parser.Comment(\n capital_name)\n break\n _, (nl, _) = v2.inline_str(0)\n if nl >= 36:\n comment = 'end ' + n2.val\n v2.ker.post_comment = None\n prepend_post_comment(v2.ker, comment)\n if re.match(r'[ekd]_', n2.val):\n if n2.val.startswith('k_'):\n swap_baronies = n2.val in kingdoms_for_barony_swap\n try:\n prepend_post_comment(v2.kel, localisation[n2.val])\n except KeyError:\n print('@@@ ' + n2.val)\n elif n2.val.startswith('c_'):\n # if v2.kel.post_comment:\n # print('c ' + v2.kel.post_comment.val)\n if (v2.kel.post_comment and\n v2.kel.post_comment.val.isdigit()):\n v2.kel.post_comment = None\n try:\n prov_id = province_id[n2.val]\n comment = '{} ({})'.format(\n localisation['PROV{}'.format(prov_id)],\n prov_id)\n prepend_post_comment(v2.kel, comment)\n except KeyError:\n print('!!! ' + n2.val)\n if swap_baronies:\n baronies = []\n for child in reversed(v2.contents):\n if child.key.val.startswith('b_'):\n baronies.append(child)\n v2.contents.remove(child)\n v2.contents.extend(baronies)\n allow_block = None\n for child in v2.contents:\n if child.key.val == 'allow':\n allow_block = child\n break\n if allow_block:\n v2.contents.remove(allow_block)\n v2.contents.append(allow_block)\n n2_lower = n2.val.lower()\n if any(n2_lower == s\n for s in ['not', 'or', 'and', 'nand', 'nor']):\n n2.val = n2_lower\n if isinstance(v2, ck2parser.Obj) and v2.has_pairs:\n update_tree(v2, swap_baronies)\n\n for inpath in ck2parser.files('common/landed_titles/*.txt', swmhpath):\n outpath = build_lt / inpath.name\n tree = ck2parser.parse_file(inpath)\n update_tree(tree)\n with outpath.open('w', encoding='cp1252', newline='\\r\\n') as f:\n f.write(tree.str())\n\nif __name__ == '__main__':\n main()\n","repo_name":"zijistark/ck2utils","sub_path":"esc/old/fix_up_lt_old.py","file_name":"fix_up_lt_old.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"3675197895","text":"\"\"\"\nauthor: Richard Sherman\n2018-12-05\nlab14-pick6.py, a lottery-style game\n\"\"\"\n\nimport random\n\npayoff = {\n 0 : 0,\n 1 : 4,\n 2 : 7,\n 3 : 100,\n 4 : 50000,\n 5 : 1000000,\n 6 : 25000000\n }\n\nwinning = random.sample(list(range(0, 100)), 6)\nbalance = 0\nfor i in range(1, 100001):\n n_match = 0\n balance = balance - 2\n ticket = random.sample(list(range(0, 100)), 6)\n print(ticket)\n print(winning)\n for j in range(len(ticket)):\n if winning[j] == ticket[j]:\n n_match += 1\n balance += payoff[n_match]\n balance = payoff[n_match] + balance\nprint(f'\\nYour final balance is {balance}')\n\n\n","repo_name":"PdxCodeGuild/class_iguana","sub_path":"Code/Richard/python/lab14-pick6.py","file_name":"lab14-pick6.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8023696043","text":"class TabletMed:\n\n def __init__(self):\n pass\n\n def initialization(self, tableDate, tabletWeight, numberPills, nameMed):\n self.tabletDate = tableDate\n self.tabletWeight = tabletWeight\n self.numberPills = numberPills\n self.nameMed = nameMed\n\n def read(self):\n self.tabletDate = int(input(\"Срок годности таблетки \"))\n self.tabletWeight = int(input(\"Вес одной таблетки \"))\n self.numberPills = int(input(\"Количество таблеток в упаковке \"))\n self.nameMed = input(\"Наименования лекарства \")\n\n def display(self):\n print(self.nameMed + \" Название таблетки, \" + str(self.numberPills) +\n \" Количество таблеток, \" + str(self.tabletWeight) + \" Вес одной таблетки, \" +\n str(self.tabletDate) + \" Срок годности \")\n\n\nclass Med:\n\n def add(self, arg1, arg2):\n F = TabletMed()\n number = (arg1.numberPills + arg2.numberPills) / 2\n if arg1.tabletWeight < arg2.tabletWeight:\n weight = arg1.tabletWeight\n else:\n weight = arg2.tabletWeight\n\n F.initialization(arg2.tabletDate, weight, number, arg1.nameMed)\n return F\n\n\nC = TabletMed()\nC.read()\nC.display()\nD = TabletMed()\nD.read()\nD.display()\napteka = Med()\napteka.add(C, D)\nitemsMed = []\nitemsMed.append(C)\nitemsMed.append(D)\nweight = int(input(\"Заданное значение, с которым будете сравнивать \"))\ncounts = int(input(\"Количество лекарств \"))\nfor i in range(counts):\n itemsMed.append(apteka.add(itemsMed[(len(itemsMed) - 1)], itemsMed[len(itemsMed) - 2]))\n\nfor f in range(len(itemsMed)):\n sum = itemsMed[f].numberPills * itemsMed[f].tabletWeight\n\nif sum > weight:\n print(round(sum, 1))\n","repo_name":"Denmelis32/tableMed","sub_path":"tabletka.py","file_name":"tabletka.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31638815915","text":"from pandas import *\ndef q4(X):\n \n # Create a reverse of the list to compare for longest common subsequence\n Y = X[::-1]\n\n # Initialize empty matrix\n n = len(X)\n C = [[0 for _ in range(len(X)+1)] for __ in range(len(X)+1)]\n D = [[0 for _ in range(len(X)+1)] for __ in range(len(X)+1)]\n\n # Simple case check\n if X == Y:\n return X\n\n elif X == \"\":\n return \"\"\n\n # Filling the matrix\n for i in range(1,n+1):\n for j in range(1,n+1):\n ii = i-1\n jj = j-1\n\n if Y[ii] == X[jj]:\n C[i][j] = C[i-1][j-1] + 1\n D[i][j] = \"up-left\"\n\n # print(f\"Found Equals: {X[i]} and {Y[j]} {i},{j}\")\n\n else:\n\n m = max(C[i-1][j], C[i][j-1])\n \n if m == C[i-1][j]:\n C[i][j] = C[i-1][j]\n D[i][j] = \"up\"\n\n elif m == C[i][j-1]:\n C[i][j] = C[i][j-1]\n D[i][j] = \"left\"\n \n # Complete answer retrival via back pointers\n row = n\n column = n\n LCS_palindrome = \"\"\n\n while row > 0 and column > 0:\n\n if D[row][column] == \"up-left\":\n LCS_palindrome += Y[row-1]\n row -= 1\n column -= 1\n \n elif D[row][column] == \"up\":\n row -= 1\n \n elif D[row][column] == \"left\":\n column -= 1\n \n return LCS_palindrome\n\ndef test_q4():\n \n test_sequence = \"strabetubsa\"\n real = \"abeba\"\n result = q4(test_sequence)\n assert len(real) == len(result), f\"Expected: {real} ({len(real)}) Got: {result} ({len(result)})\"\n\n test_sequence = \"babad\"\n real = \"bab\"\n result = q4(test_sequence)\n assert len(real) == len(result), f\"Expected: {real} ({len(real)}) Got: {result} ({len(result)})\"\n\n test_sequence = \"cbbd\"\n real = \"bb\"\n result = q4(test_sequence)\n assert len(real) == len(result), f\"Expected: {real} ({len(real)}) Got: {result} ({len(result)})\"\n\n test_sequence = \"racecar\"\n real = \"racecar\"\n result = q4(test_sequence)\n assert len(real) == len(result), f\"Expected: {real} ({len(real)}) Got: {result} ({len(result)})\"\n\n test_sequence = \"Helleworld\"\n real = \"elle\"\n result = q4(test_sequence)\n assert len(real) == len(result), f\"Expected: {real} ({len(real)}) Got: {result} ({len(result)})\"\n\ntest_q4()","repo_name":"Ryan-JW-Kim/Year3Sem2","sub_path":"CP312/a4/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70328605638","text":"from __future__ import annotations\n\nfrom collections.abc import Awaitable, Callable\nimport logging\nimport sys\n\nfrom aiohttp import ClientSession\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.aiohttp_client import async_create_clientsession\n\nfrom ..helpers.enums import ConnectivityStatus\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass BaseAPI:\n \"\"\"The Class for handling the data retrieval.\"\"\"\n\n hass: HomeAssistant\n session: ClientSession | None\n status: ConnectivityStatus\n data: dict\n onDataChangedAsync: Callable[[], Awaitable[None]] | None = None\n onStatusChangedAsync: Callable[[ConnectivityStatus], Awaitable[None]] | None = None\n\n def __init__(\n self,\n hass: HomeAssistant | None,\n async_on_data_changed: Callable[[], Awaitable[None]] | None = None,\n async_on_status_changed: Callable[[ConnectivityStatus], Awaitable[None]]\n | None = None,\n ):\n self.hass = hass\n self.status = ConnectivityStatus.NotConnected\n self.data = {}\n self.onDataChangedAsync = async_on_data_changed\n self.onStatusChangedAsync = async_on_status_changed\n\n self.session = None\n\n @property\n def is_home_assistant(self):\n return self.hass is not None\n\n async def initialize_session(self, cookies=None, cookie_jar=None):\n try:\n if self.is_home_assistant:\n self.session = async_create_clientsession(\n hass=self.hass, cookies=cookies, cookie_jar=cookie_jar\n )\n\n else:\n self.session = ClientSession(cookies=cookies, cookie_jar=cookie_jar)\n\n await self.login()\n\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.warning(\n f\"Failed to initialize session, Error: {str(ex)}, Line: {line_number}\"\n )\n\n await self.set_status(ConnectivityStatus.Failed)\n\n async def login(self):\n _LOGGER.info(\"Performing login\")\n\n await self.set_status(ConnectivityStatus.Connecting)\n\n async def validate(self, data: dict | None = None):\n pass\n\n async def terminate(self):\n self.data = {}\n\n await self.set_status(ConnectivityStatus.Disconnected)\n\n async def set_status(self, status: ConnectivityStatus):\n if status != self.status:\n self.status = status\n\n await self.fire_status_changed_event()\n\n async def fire_status_changed_event(self):\n if self.onStatusChangedAsync is not None:\n await self.onStatusChangedAsync(self.status)\n\n async def fire_data_changed_event(self):\n if self.onDataChangedAsync is not None:\n await self.onDataChangedAsync()\n","repo_name":"elad-bar/ha-edgeos","sub_path":"custom_components/edgeos/core/api/base_api.py","file_name":"base_api.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"62"} +{"seq_id":"8980440103","text":"from flask import Blueprint, flash, redirect, render_template, request, url_for\nfrom flask_login import current_user, login_required\nfrom webapp.db import db, UNITS\nfrom webapp.recipe.forms import AddRecipeForm\nfrom webapp.recipe.models import (\n Ingredient,\n PRODUCT_CATEGORIES,\n Product,\n RECIPE_CATEGORIES,\n Recipe,\n RecipeSchema,\n RecipeDescription,\n)\nfrom webapp.shopping_list.forms import ChooseListForm\nfrom webapp.shopping_list.models import ShoppingList\nfrom webapp.utils import flash_errors_from_form, get_admin_id, object_does_not_exist\nfrom uuid import uuid4\n\nblueprint = Blueprint(\"recipe\", __name__, url_prefix=\"/recipes\")\n\n\n@blueprint.route(\"/public\")\ndef public_recipes():\n admin_id = get_admin_id()\n public_recipes = Recipe.query.filter(Recipe.user_id == admin_id).all()\n return render_template(\"recipe/public_recipes.html\", public_recipes=public_recipes)\n\n\n@blueprint.route(\"/my_recipes\")\n@login_required\ndef my_recipes():\n user_recipes = Recipe.query.filter(Recipe.user_id == current_user.id).all()\n return render_template(\"recipe/my_recipes.html\", user_recipes=user_recipes)\n\n\n@blueprint.route(\"/add_recipe\", methods=[\"POST\", \"GET\"])\n@login_required\ndef add_recipe():\n form = AddRecipeForm()\n if request.method == \"GET\":\n return render_template(\"recipe/add_recipe.html\", form=form)\n\n if form.validate_on_submit():\n name = form.name.data\n\n recipe_name_already_used = bool(\n Recipe.query.filter(\n Recipe.name == name, Recipe.user_id == current_user.id\n ).count()\n )\n if recipe_name_already_used:\n flash(\"Рецепт с таким именем уже существует\", category=\"danger\")\n return render_template(\"/recipe/add_recipe.html\", form=form)\n\n category = form.category.data\n cooking_time = form.cooking_time.data\n\n recipe = Recipe(\n name=name,\n user_id=current_user.id,\n category=category,\n cooking_time=cooking_time,\n )\n db.session.add(recipe)\n db.session.commit()\n\n recipe = Recipe.query.filter(\n Recipe.name == recipe.name, Recipe.user_id == recipe.user_id\n ).one()\n\n return render_template(\n \"recipe/add_ingredients&cooking_steps.html\",\n recipe=recipe,\n PRODUCT_CATEGORIES=PRODUCT_CATEGORIES,\n RECIPE_CATEGORIES=RECIPE_CATEGORIES,\n UNITS=UNITS,\n )\n else:\n flash_errors_from_form(form)\n\n return redirect(url_for(\"recipe.my_recipes\"))\n\n\n@blueprint.route(\"/add_ingredient/\", methods=[\"POST\"])\n@login_required\ndef add_ingredient(recipe_id):\n recipe = Recipe.query.filter(Recipe.id == recipe_id).one_or_none()\n if not recipe:\n flash(\"При добавлении ингредиента произошла ошибка\")\n return redirect(url_for(\"recipe.my_recipes\"))\n\n product_name = request.form.get(\"product_name\")\n product_category = request.form.get(\"product_category\")\n ingredient_quantity = request.form.get(\"ingredient_quantity\")\n ingredient_unit = request.form.get(\"ingredient_unit\")\n\n if all([product_name, product_category, ingredient_quantity, ingredient_unit]):\n product = Product.query.filter(Product.name == product_name).one_or_none()\n\n if not product:\n product = Product(name=product_name, category=product_category)\n db.session.add(product)\n db.session.commit()\n product = Product.query.filter(Product.name == product_name).one()\n\n product_id = product.id\n\n quantity = ingredient_quantity\n\n ingredient = Ingredient(\n product_id=product_id,\n quantity=quantity,\n unit=ingredient_unit,\n recipe_id=recipe.id,\n )\n\n db.session.add(ingredient)\n db.session.commit()\n return \"ok\"\n\n else:\n return \"failed\"\n\n\n@blueprint.route(\"/add_recipe_description/\", methods=[\"POST\"])\ndef add_recipe_description(recipe_id):\n cooking_step_text = request.form.get(\"cooking_step_text\")\n\n if cooking_step_text:\n cooking_step_obj = RecipeDescription(\n recipe_id=recipe_id, text=cooking_step_text\n )\n db.session.add(cooking_step_obj)\n db.session.commit()\n return \"ok\"\n\n else:\n return \"failed\"\n\n\n@blueprint.route(\"/\")\ndef recipe(recipe_id):\n recipe_schema = RecipeSchema()\n form = ChooseListForm()\n recipe = Recipe.query.filter(Recipe.id == recipe_id).one_or_none()\n print(recipe_schema.dump(recipe))\n if not recipe:\n return redirect(url_for(\"recipe.public_recipes\"))\n\n admin_id = get_admin_id()\n\n current_user_id = None\n if current_user.is_authenticated:\n current_user_id = current_user.id\n\n if recipe.user_id != admin_id and recipe.user_id != current_user_id:\n flash(\"Этот рецепт Вам недоступен\")\n return redirect(url_for(\"recipe.public_recipes\"))\n\n elif current_user.is_authenticated:\n if not current_user.shopping_lists:\n new_shopping_list = ShoppingList(\n name=\"Мой список покупок\",\n user_id=current_user.id,\n public_id=str(uuid4()),\n )\n db.session.add(new_shopping_list)\n db.session.commit()\n form.name.choices = [\n shopping_list.name for shopping_list in current_user.shopping_lists\n ]\n\n return render_template(\n \"/recipe/recipe.html\",\n PRODUCT_CATEGORIES=PRODUCT_CATEGORIES,\n RECIPE_CATEGORIES=RECIPE_CATEGORIES,\n recipe=recipe,\n form=form,\n )\n\n\n@blueprint.route(\"/delete_recipe/\")\n@login_required\ndef delete_recipe(recipe_id):\n recipe_to_delete = Recipe.query.filter(Recipe.id == recipe_id).one_or_none()\n\n if recipe_to_delete:\n db.session.delete(recipe_to_delete)\n db.session.commit()\n flash(\"Рецепт удалён.\", category=\"success\")\n\n return redirect(url_for(\"recipe.my_recipes\"))\n\n\n@blueprint.route(\"/copy_to_my_recipes/\")\n@login_required\ndef copy_to_my_recipes(recipe_id):\n admin_id = get_admin_id()\n recipe = Recipe.query.filter(\n Recipe.id == recipe_id, Recipe.user_id == admin_id\n ).one_or_none()\n\n if recipe:\n if object_does_not_exist(Recipe, recipe.name):\n recipe_obj = Recipe(\n name=recipe.name,\n user_id=current_user.id,\n category=recipe.category,\n cooking_time=recipe.cooking_time,\n )\n db.session.add(recipe_obj)\n recipe_copy = Recipe.query.filter(\n Recipe.name == recipe.name, Recipe.user_id == current_user.id\n ).one()\n\n for ingredient in recipe.ingredients:\n ingredient_copy = Ingredient(\n product_id=ingredient.product_id,\n quantity=ingredient.quantity,\n unit=ingredient.unit,\n recipe_id=recipe_copy.id,\n )\n db.session.add(ingredient_copy)\n\n for step in recipe.description:\n step_copy = RecipeDescription(recipe_id=recipe_copy.id, text=step.text)\n db.session.add(step_copy)\n db.session.commit()\n\n flash(\"Рецепт успешно добавлен в Ваши рецепты\", category=\"success\")\n return redirect(url_for(\"recipe.recipe\", recipe_id=recipe_copy.id))\n else:\n flash(\"Что-то пошло не так\")\n\n return redirect(url_for(\"recipe.public_recipes\"))\n","repo_name":"AlexandraPoturaeva/ChefList_web","sub_path":"webapp/recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73957562118","text":"#!/usr/bin/python3\n\"\"\"\nclass BaseModel is a class that defines\nall common attributes/methods for other classes\n\"\"\"\n\n\nimport models\nimport uuid\nfrom datetime import datetime\n\n\nclass BaseModel:\n \"\"\"\n All the instance used listed:\n __init__(self, *args, **kwargs):\n __str__(self):\n save(self):\n to_dict(self):\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Function - __init__(self, *args, **kwargs):\n assigned the value to the instance\n\n Object:\n args(*): the arguments\n kwargs(**): ths keyword of the arguments\n\n Return:\n nothing\n \"\"\"\n if kwargs:\n for first, second in kwargs.items():\n if first != '__class__':\n if first != 'created_at' and first != 'updated_at':\n setattr(self, first, second)\n else:\n setattr(self, first, datetime.fromisoformat(second))\n else:\n time = datetime.now()\n self.id = str(uuid.uuid4())\n self.created_at = time\n self.updated_at = time\n models.storage.new(self)\n\n def __str__(self):\n \"\"\"\n Function - __str__(self):\n print the information of the class\n\n Object:\n nothing\n\n Return:\n the class name, the id, and the dictionairy\n \"\"\"\n return(\"[{}] ({}) {}\".format(\n self.__class__.__name__,\n self.id,\n self.__dict__\n ))\n\n def save(self):\n \"\"\"\n Function - save(self):\n update the time when the class is used\n\n Object:\n nothing\n\n Return:\n nothing\n \"\"\"\n self.updated_at = datetime.today()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"\n Function - to_dict(self):\n the ditionary of the class\n\n Object:\n nothing\n\n Return:\n the new dictionary\n \"\"\"\n dict_new = self.__dict__.copy()\n dict_new[\"__class__\"] = self.__class__.__name__\n dict_new[\"created_at\"] = dict_new[\"created_at\"].isoformat()\n dict_new[\"updated_at\"] = dict_new[\"updated_at\"].isoformat()\n return dict_new\n","repo_name":"cocofox1902/AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35040127234","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path(\"page/\", views.getPage, name = \"page\"),\r\n path('events', views.getEvents, name = 'events'),\r\n path('lives', views.getLive, name = 'lives'),\r\n path('team', views.getTeam, name = 'team'),\r\n path('memes', views.getMeme, name = 'memes'),\r\n path('activity', views.getActivity, name = 'activity'),\r\n path('coming-soon', views.comingSoon, name = 'coming-soon')\r\n]\r\n","repo_name":"FIMehedi/lipikoron-beta","sub_path":"features/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"30270399375","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport pprint\n\n#gain access to the google sheets\nscope = ['https://www.googleapis.com/auth/drive']\ncred = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\nclient = gspread.authorize(cred)\n\n#open The google sheets\nsheet = client.open('google images').sheet1\n\n\nrow = [\"i'm\", \"updating\", \"a\", \"spreadsheet\", \"from\", \"Python!\"]\nindex = 3\nsheet.insert_row(row, index)","repo_name":"kunle-lawal/Scraper","sub_path":"scraping/spreadsheet.py","file_name":"spreadsheet.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8226748001","text":"import sys\nsys.stdin=open('input.txt', 'r')\n\nt=int(input())\n# 테스트 케이스 수\nfor _ in range(1, t+1):\n #문제에서 1번부터 출력하기 요구하므로 1부터 t+1까지의 리스트를 만들고\n n=int(input())\n #카드 수를 입력하고\n d=list(input().split())\n #카드 구성을 리스트로 받는다\n d1=[]\n d2=[]\n d3=[]\n #d1, d2는 반반씩 담을 거. d3은 한장씩 뽑을 거\n for c in range(n):\n # 여기에서 led(d)를 써야하나 했는데 그냥 n이랑 그게 그거더라\n if c < n // 2:\n d1.append(d[c])\n #c를 인덱스로 하는 리스트 d의 요소를 나눠서 넣을건데\n #먼저 c가 n을 2로 나눈 것의 몫보다 작으면 d1에 넣어주고\n\n else:\n #c가 n을 2로 나눈 몫보다 큰데\n if n % 2 == 1:\n #n이 홀수면\n d1.append(d[c])\n #d1에 한번 더 넣어주고\n d2.append(d[c])\n #나머지 남은 거를 d2에 넣어준다\n #여기까지 절반 나누기\n\n while True:\n #여기서 트루를 쓰는게 맞나? 영 모르겠네\n if len(d1) != 0:\n # d1 리스트의 길이가 0이 아니면\n d3.append(d1.pop(0))\n # 먼저 d1 맨 앞자리에서 카드를 뽑아서 d3에 넣고\n try : d3.append(d2.pop(0))\n except : continue\n # 다음으로 d2 맨 앞자리에서 카드를 뽑아서 d3에 넣는데\n # n이 홀수일 경우 100% 빈 리스트(d2가 먼저 바닥남)에서 팝 못한다고 오류가 날 것이기 때문에\n # try문을 사용해서 d2에 뭐가 있으면 계속 뽑고 오류를 뱉거든 걍 넘어가거라 하고 명령을 내린다\n # try문 처음써봄\n else:\n print(f'#{_}', *d3)\n break\n # d1리스트의 길이가 0이면 프린트하고 브레이크\n\n# 처음에 이렇게 이렇게 풀면 되겠다! 하고 계획을 짠 건 맞았는데\n# 계속 원하는 대로 출력이 안되어서 조금씩 조금씩 디테일을 다듬었으나 8개 케이스 중 5�� 정답이라고 함\n# 입력 예시 세개는 맞았다고 빼애앵\n# 힝구 뭐가 빠졌느냐\n \n\n \n\n\n","repo_name":"nihelv/algorithm","sub_path":"Examples/퍼펙트셔플.py","file_name":"퍼펙트셔플.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72402182276","text":"# Runtime 32 ms, Memory Usage 11.8 MB\n\nclass Solution(object):\n def fib(self, N):\n # declare a store list that will contain all of our fibonacci numbers\n store = [0, 1]\n\n # run while the length of our store is less than or equal to the input N\n while len(store) <= N:\n\n # declare a variable equal to the sum of the last two numbers in the store\n sum = store[-1] + store[-2]\n\n # append that sum to the store\n store.append(sum)\n \n # return the last element in the store\n return store[-1]\n\n ","repo_name":"Bcdirito/algorithm_problems","sub_path":"recursion/fibonacci/no_counter.py","file_name":"no_counter.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74900866757","text":"from Mascotas import basededatos, Mascota, Juguete, Propietario, app\n\nwith app.app_context():\n # creamos los objetos\n\n mascota1 = Mascota('Felipe')\n mascota2 = Mascota('Katy')\n\n basededatos.session.add_all([mascota1, mascota2])\n basededatos.session.commit()\n\n # buscamos todas las mascotas\n mascotas = Mascota.query.all()\n print(mascotas)\n\n # filtrar por un nombre, si tiene dato o si hay mas.\n mascota1 = Mascota.query.filter_by(nombre='Felipe').first()\n\n # ahora a propietario\n propietario1 = Propietario('pedro', mascota1.id)\n basededatos.session.add(propietario1)\n basededatos.session.commit()\n\n # ahora a juguetes\n\n juguete1 = Juguete('Pelota de futbol', mascota1.id)\n juguete2 = Juguete('oso de peluche', mascota1.id)\n basededatos.session.add_all([juguete1, juguete2])\n basededatos.session.commit()\n\n # filtrar mascotas\n mascota = Mascota.query.filter_by(nombre='Katy').first()\n print(mascota)\n mascota.mostrar_juguetes()\n","repo_name":"emersonxinay/flask_base_datos","sub_path":"consultas.py","file_name":"consultas.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24055221792","text":"from typing import Annotated\nfrom fastapi import ( # noqa\n APIRouter,\n HTTPException,\n status,\n Path,\n Depends,\n Query,\n)\nfrom typing import Optional\nfrom src.api import security\nfrom src.schemas import Loja, Pagamento\nfrom src.infra.database.repository import Repository\nfrom src.infra.database.manager import DatabaseConnectionManager\n\n\ncurrent_user = Annotated[Loja, Depends(security.current_user)]\nNotFoundException = HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail=\"Pagamento não encontrado\"\n)\n\nrouter = APIRouter(prefix=\"/pagamentos\", tags=[\"Pagamentos\"])\n\n\n@router.get(\"/\")\nasync def requisitar_pagamentos(loja_uuid: Optional[str] = Query(None)):\n \"\"\"\n Requisita pagamentos cadastrados na plataforma.\n \n Args:\n loja_uuid (Optional[str]): O uuid da loja, caso necessário.\n \n Returns:\n list[Pagamento]: Lista de pagamentos encontrados.\n \"\"\"\n kwargs = {}\n if loja_uuid is not None:\n kwargs[\"loja_uuid\"] = loja_uuid\n async with DatabaseConnectionManager() as connection:\n repository = Repository(Pagamento, connection=connection)\n results = await repository.find_all(**kwargs)\n\n return results\n\n\n@router.get(\"/{uuid}\")\nasync def requisitar_pagamento(\n uuid: Annotated[str, Path(title=\"O uuid do pagamento a fazer get\")]\n):\n \"\"\"\n Busca um pagamento pelo seu uuid.\n \n Args:\n uuid (str): O uuid do pagamento a ser buscado.\n \n Returns:\n Pagamento: O pagamento encontrado.\n \n Raises:\n HTTPException: Se o pagamento não for encontrado.\n \"\"\"\n async with DatabaseConnectionManager() as connection:\n repository = Repository(Pagamento, connection=connection)\n result = await repository.find_one(uuid=uuid)\n\n if result is None:\n raise NotFoundException\n\n return result\n\n\n@router.post(\"/\", status_code=201)\nasync def cadastrar_pagamentos(pagamento: Pagamento):\n \"\"\"\n Cadastra um novo pagamento na plataforma.\n \n Args:\n pagamento (Pagamento): Os detalhes do pagamento a ser cadastrado.\n \n Returns:\n dict: Um dicionário contendo o uuid do pagamento cadastrado.\n \n Raises:\n HTTPException: Se ocorrer um erro durante o cadastro.\n \"\"\"\n async with DatabaseConnectionManager() as connection:\n repository = Repository(Pagamento, connection=connection)\n try:\n uuid = await repository.save(pagamento)\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n return {\"uuid\": uuid}\n\n\n@router.put(\"/{uuid}\")\nasync def atualizar_pagamento_put(\n pagamento_Data: Pagamento,\n uuid: Annotated[str, Path(title=\"O uuid do pagemento a fazer put\")],\n):\n \"\"\"\n Atualiza um pagamento utilizando o método HTTP PUT.\n \n Args:\n pagamento_Data (Pagamento): Os novos dados do pagamento.\n uuid (str): O uuid do pagamento a ser atualizado.\n \n Returns:\n dict: Um dicionário contendo o número de linhas afetadas na atualização.\n \n Raises:\n HTTPException: Se o pagamento não for encontrado.\n \"\"\"\n async with DatabaseConnectionManager() as connection:\n repository = Repository(Pagamento, connection=connection)\n pagamento = await repository.find_one(uuid=uuid)\n if pagamento is None:\n raise NotFoundException\n\n num_rows_affected = await repository.update(\n pagamento, pagamento_Data.model_dump() # type: ignore\n )\n\n return {\"num_rows_affected\": num_rows_affected}\n\n\n@router.patch(\"/{uuid}\")\nasync def atualizar_pagamento_patch(\n pagamentoData: Pagamento,\n uuid: Annotated[str, Path(title=\"O uuid do pagamento a fazer patch\")],\n):\n async with DatabaseConnectionManager() as connection:\n repository = Repository(Pagamento, connection=connection)\n pagamento = await repository.find_one(uuid=uuid)\n if pagamento is None:\n raise NotFoundException\n\n num_rows_affected = await repository.update(\n pagamento, pagamentoData.model_dump() # type: ignore\n )\n\n return {\"num_rows_affected\": num_rows_affected}\n\n\n@router.delete(\"/{uuid}\")\nasync def remover_pagamento(\n uuid: Annotated[str, Path(title=\"O uuid do pagemento a fazer delete\")]\n):\n \"\"\"\n Remove um pagamento pelo seu uuid.\n \n Args:\n uuid (str): O uuid do pagamento a ser removido.\n \n Returns:\n dict: Um dicionário contendo o número de itens removidos.\n \n Raises:\n HTTPException: Se ocorrer um erro durante a remoção.\n \"\"\"\n async with DatabaseConnectionManager() as connection:\n repository = Repository(Pagamento, connection=connection)\n try:\n itens_removed = await repository.delete_from_uuid(uuid=uuid)\n except Exception as error:\n raise HTTPException(status_code=500, detail=str(error))\n\n return {\"itens_removed\": itens_removed}\n","repo_name":"antoniofernandodj/Chickie","sub_path":"backend/src/api/routes/resources/pagamentos.py","file_name":"pagamentos.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72160137798","text":"class Solution(object):\n def findEven(self,num):\n count = 0\n value = num \n while(value != 0):\n value = value // 10\n count += 1\n \n return count%2==0\n def findNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ans = 0\n for num in nums:\n if(self.findEven(num)):\n ans += 1\n return ans\n ","repo_name":"Saiprem98/leetcode_challenge","sub_path":"explore_array_lc/findNumbers.py","file_name":"findNumbers.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"473551917","text":"from airflow import DAG, utils\nfrom airflow.decorators import task\nfrom airflow.operators.python import PythonOperator, BranchPythonOperator\nfrom airflow.operators.trigger_dagrun import TriggerDagRunOperator\n# from airflow.providers.docker.operators.docker import DockerOperator\n\nfrom customCode import CustomOperator, CustomHook, CustomSensor\n\nwith DAG(\n dag_id='learn_example',\n start_date=utils.dates.days_ago(3),\n schedule_interval=\"@daily\"\n) as dag:\n @task()\n def step1(**context):\n context[\"task_instance\"].xcom_push(key=\"message\", value=\"step5\")\n return \"message\"\n\n @task()\n def step2(message):\n print(message)\n\n def __step3__(**context):\n message = context[\"task_instance\"].xcom_pull(key=\"message\")\n print(message)\n return message\n\n def __step9__(arg, **context):\n hook = CustomHook(arg)\n hook.print_argument(\"World\")\n\n step3 = BranchPythonOperator(\n task_id='step3',\n python_callable=__step3__\n )\n\n step4 = PythonOperator(\n task_id='step4',\n trigger_rule=\"all_done\",\n python_callable=lambda: print(\"step4\")\n )\n\n step5 = PythonOperator(\n task_id='step5',\n python_callable=lambda: print(\"step5\")\n )\n\n step6 = PythonOperator(\n task_id='step6',\n python_callable=lambda: print(\"step6\")\n )\n\n step7 = TriggerDagRunOperator(\n trigger_dag_id=\"context\",\n task_id='step7'\n )\n\n step8 = CustomOperator(\n task_id='step8',\n args='Argument'\n )\n\n step9 = PythonOperator(\n task_id='step9',\n python_callable=__step9__,\n op_kwargs={\"arg\": \"Hello\"},\n )\n\n step10 = CustomSensor(\n task_id='step10',\n )\n\n # step11 = DockerOperator(\n # image=\"ubuntu:latest\",\n # task_id=\"step11\",\n # command=\"echo hello\",\n # network_mode=\"bridge\",\n # )\n\n msg = step1()\n step2(msg)\n step3.set_upstream(msg)\n step4.set_upstream(msg)\n step5.set_upstream(step3)\n step6.set_upstream(step3)\n step7.set_upstream(step4)\n step10.set_upstream(step7)\n step10.set_downstream(step8)\n step10.set_downstream(step9)\n # step11.set_upstream(step10)\n","repo_name":"rparthas/data","sub_path":"airflow/dags/example_dag.py","file_name":"example_dag.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"41253995034","text":"# -*- coding: utf-8 -*-\n\n\ndef main():\n from collections import defaultdict\n import sys\n\n input = sys.stdin.readline\n\n n = int(input())\n d = defaultdict(int)\n\n for _ in range(n):\n si = input().rstrip()\n\n if d[si] == 0:\n print(si)\n else:\n print(si + \"(\" + str(d[si]) + \")\")\n\n d[si] += 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KATO-Hiro/AtCoder","sub_path":"ABC/abc251-abc300/abc261/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"70274095879","text":"\"\"\"\nModule for quantum potentials to be tested\n\"\"\"\n\n\nimport numpy as pot_np\n\n## Finite square well potential\n## returns V0 if x is inside the well, 0 otherwise\n# x - position array\n# pars[0]: L - width of the well, centered at x=0\n# pars[1]: V0 - height of potential (positive if barrier, negative if well)\ndef finite_square(x, pars):\n if len(pars) != 2:\n print(\"Error: wrong parameters for finite square well potential\")\n print(\"Usage: pars[0] = m, pars[1] = omega\")\n return None\n V = pot_np.zeros_like(x)\n V[pot_np.absolute(x) <= pars[0]/2.] = pars[1]\n return V\n\n\n## Harmonic oscillator potential\n## returns a quadratic potential with shape governed by m, omega\n# x - position array\n# pars[0]: m - particle mass\n# pars[1]: omega - potential frequency\ndef harmonic_oscillator(x, pars):\n if len(pars) != 2:\n print(\"Error: wrong parameters for harmonic oscillator potential\")\n print(\"Usage: pars[0] = m, pars[1] = omega\")\n return None\n return 0.5 * pars[0] * pars[1]**2 * x**2\n\n\n## Dirac delta function potential\n## Uses the Lorentzian limit definition; resolution limited by how fine x is\n# x - position array\n# pars[0]: x0 - location of delta spike\n# pars[1]: A - delta magnitude\n# pars[2]: width of delta function\ndef delta_potential(x, pars):\n if len(pars) != 3:\n print(\"Error: wrong parameters for delta potential\")\n print(\"Usage: pars[0] = x0, pars[1] = A, pars[2] = width\")\n return None\t\n #epsilon = 1e-6 # may change epsilon depending on numerical error involved\n return pars[1] * 0.5*pars[2] / ((x-pars[0])**2 + (0.5*pars[2])**2) / pot_np.pi\n\n\n## symmetric Dirac delta function potential\n## Uses the Lorentzian limit definition; ....\n# x - position array\n# pars[0]: x0 - location of delta spike *first*\n# pars[1]: A - delta magnitudes\n# pars[2]: width of delta functions\ndef double_delta_potential(x, pars):\n if len(pars) != 3:\n print(\"Error: wrong parameters for delta potential\")\n print(\"Usage: pars[0] = x0, pars[1] = A, pars[2] = width\")\n return None\t\n #epsilon = 1e-6 # may change epsilon depending on numerical error involved\n return pars[1]*(0.5*pars[2] / ((x-pars[0])**2 + (0.5*pars[2])**2) / pot_np.pi +\n 0.5*pars[2] / ((x+pars[0])**2 + (0.5*pars[2])**2) / pot_np.pi)\n\n\n## Step potential barrier\n## returns V0 if x is inside the barrier, 0 otherwise. Barrier left to right only\n# x - position array\n# pars[0]: x0 - barrier location\n# pars[1]: V0 - barrier height\ndef step_potential(x, pars):\n if len(pars) != 2:\n print(\"Error: wrong parameters for step potential\")\n print(\"Usage: pars[0] = x0, pars[1] = V0\")\n return None\n V = pot_np.zeros_like(x)\n V[x >= pars[0]] = pars[1]\n\n \n## Lennard-Jones potential (intermolecular potential)\n## returns spherical Lennard-Jones distribution\n# x - position from the origin (spherical)\n# pars[0]: epsilon - potential strength\n# pars[1]: sigma - potential scaling\ndef Lennard_Jones_potential(x, pars):\n if len(pars) != 2:\n print(\"Error: wrong parameters for Lennard-Jones potential\")\n print(\"Usage: pars[0] = epsilon, pars[1] = sigma\")\n return None\n if pot_np.any(x <= 0.):\n print(\"Error: all x must be greater than 0 for the Lennard-Jones\")\n return None\n return 4.*pars[0]*((pars[1]/x)**12-(pars[1]/x)**6)\n\n\n## Morse intermolecular potential\n## returns spherical Morse distribution; similar to Lennard-Jones\n# x - position from the origin (spherical)\n# pars[0]: r0 - minimum location\n# pars[1]: D - potential scale\n# pars[2]: a - exponential scale\ndef Morse_potential(x, pars):\n if len(pars) != 3:\n print(\"Error: wrong parameters for Morse potential\")\n print(\"Usage: pars[0] = epsilon, pars[1] = sigma\")\n return None\n if pot_np.any(x <= 0.):\n print(\"Error: all x must be greater than 0 for the Morse potential\")\n return None\n\n return pars[1]*(1. - pot_np.exp(-pars[2]*(x-pars[0]))**2) - pars[1]\n \n","repo_name":"mreh528/phys5070FinalProject","sub_path":"potentials.py","file_name":"potentials.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19710446771","text":"from unicodedata import name\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('jobs/', views.jobs_index, name='index'),\n path('jobs//', views.jobs_detail, name=\"detail\"),\n path('jobs/create', views.JobCreate.as_view(), name='jobs_create'),\n path('jobs//update/', views.JobUpdate.as_view(), name='jobs_update'),\n path('jobs//delete/', views.JobDelete.as_view(), name='jobs_delete'),\n path('jobs//add_followup/', views.add_followup, name='add_followup'),\n path('techstack/', views.Tech_stackList.as_view(), name='tech_stack_index'),\n path('techstack//', views.Tech_stackDetail.as_view(), name='tech_stack_detail'),\n path('techstack/create/', views.Tech_stackCreate.as_view(), name='tech_stack_create'),\n path('techstack//update/', views.Tech_stackUpdate.as_view(), name='tech_stack_update'),\n path('techstack//delete/', views.Tech_stackDelete.as_view(), name='tech_stack_delete'),\n path('techstack//assoc_tech_stack//', views.assoc_tech_stack, name='assoc_tech_stack'),\n path('accounts/signup/', views.signup, name='signup'),\n]","repo_name":"dominikconway/jobtracker","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7188004502","text":"ROMANIAN_CHARS = 'AĂÂBCDEFGHIÎJKLMNOPQRSȘTȚUVWXYZAĂÂBCDEFGHIÎJKLMNOPQRSȘTȚUVWXYZ'\n\ndef check_valid_text(input_text):\n if not all(char.upper() in ROMANIAN_CHARS for char in input_text):\n raise ValueError(\"The text can contain letters just from Romanian alphabet.\")\n\ndef get_char_index(char):\n if char.upper() in ROMANIAN_CHARS:\n return ROMANIAN_CHARS.index(char.upper())\n raise ValueError(f\"{char} character is not valid.\")\n\ndef get_char_from_index(index):\n return ROMANIAN_CHARS[index]\n\ndef cipher_text(message, cipher_key):\n check_valid_text(message)\n check_valid_text(cipher_key)\n\n if len(cipher_key) < 7:\n raise ValueError(\"The key length should be not less than 7.\")\n\n message = message.replace(\" \", \"\").upper()\n cipher_key = cipher_key.upper()\n\n ciphered_text = \"\"\n\n for index, char in enumerate(message):\n key_index = get_char_index(cipher_key[index % len(cipher_key)])\n cipher_index = (get_char_index(char) + key_index) % len(ROMANIAN_CHARS)\n ciphered_text += get_char_from_index(cipher_index)\n\n shifted_chars = get_shifted_chars(cipher_key)\n print(\"Shifted characters: \", shifted_chars)\n return ciphered_text\n\ndef get_shifted_chars(cipher_key):\n shifted_chars = \"\"\n cipher_key = cipher_key.upper()\n\n for index, char in enumerate(ROMANIAN_CHARS):\n key_index = get_char_index(cipher_key[index % len(cipher_key)])\n shifted_char_index = (get_char_index(char) + key_index) % len(ROMANIAN_CHARS)\n shifted_chars += get_char_from_index(shifted_char_index)\n\n return shifted_chars\n\ndef decipher_text(encrypted_text, cipher_key):\n check_valid_text(encrypted_text)\n check_valid_text(cipher_key)\n\n if len(cipher_key) < 7:\n raise ValueError(\"The key length should be not less than 7.\")\n\n encrypted_text = encrypted_text.replace(\" \", \"\").upper()\n cipher_key = cipher_key.upper()\n\n deciphered_text = \"\"\n\n for index, char in enumerate(encrypted_text):\n key_index = get_char_index(cipher_key[index % len(cipher_key)])\n decipher_index = (get_char_index(char) - key_index) % len(ROMANIAN_CHARS)\n deciphered_text += get_char_from_index(decipher_index)\n\n return deciphered_text\n\nwhile True:\n try:\n choice = input(\"Choose operation (1 - encrypt, 2 - decrypt, 0 - stop):\\n\").lower()\n\n if choice == '0':\n print(\"Exiting program.\")\n break\n\n cipher_key = input(\"Enter the key: \")\n input_text = input(\"Enter the text: \")\n\n if choice == '1':\n print(\"Encrypted text:\", cipher_text(input_text, cipher_key))\n elif choice == '2':\n print(\"Decrypted text:\", decipher_text(input_text, cipher_key))\n else:\n print(\"Invalid choice.\")\n\n except ValueError as error:\n print(error)\n","repo_name":"GSandu1/CS","sub_path":"lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23947624405","text":"\"\"\"empty message\n\nRevision ID: 0ecb4e38e2b1\nRevises: b87a2bfecca8\nCreate Date: 2022-09-16 14:16:46.781839\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0ecb4e38e2b1'\ndown_revision = 'b87a2bfecca8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('day',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date', sa.Date(), nullable=True),\n sa.Column('date_str', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('date'),\n sa.UniqueConstraint('date_str'),\n sa.UniqueConstraint('id')\n )\n op.drop_table('days')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('days',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('date', sa.DATE(), autoincrement=False, nullable=True),\n sa.Column('date_str', sa.VARCHAR(), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='days_pkey'),\n sa.UniqueConstraint('date', name='days_date_key'),\n sa.UniqueConstraint('date_str', name='days_date_str_key')\n )\n op.drop_table('day')\n # ### end Alembic commands ###\n","repo_name":"jpdesc/Oura-Tracker-App","sub_path":"migrations/versions/0ecb4e38e2b1_.py","file_name":"0ecb4e38e2b1_.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4053589601","text":"from sqlalchemy import create_engine, Column, Integer, DateTime, String, ForeignKey, Enum, Boolean, Float\nfrom sqlalchemy.orm import relationship\n\nfrom . import Base, StatusEnum, LanguageEnum, VerdictEnum\nfrom battle.api import Language, Status, Verdict\n\n\nclass TestCase(Base):\n __tablename__ = 'testcase'\n\n testcase_id = Column(Integer, primary_key=True)\n team_id = Column(Integer, ForeignKey('team.team_id'))\n problem_id = Column(Integer, ForeignKey('problem.problem_id'))\n status = Column(StatusEnum, nullable=False)\n test = Column(String, nullable=False)\n testcase_time = Column(DateTime(timezone=True), nullable=False)\n\n max_judge_time = Column(Float)\n max_judge_mem = Column(Float)\n\n def get_status(self):\n return Status[self.status]\n\n events = relationship('SolutionEvent', backref='testcase', order_by='SolutionEvent.event_time')\n\n judgements = relationship('Judgement', backref='testcase', order_by='desc(Judgement.judgement_id)')\n\n\nclass Solution(Base):\n __tablename__ = 'solution'\n\n solution_id = Column(Integer, primary_key=True)\n team_id = Column(Integer, ForeignKey('team.team_id'))\n problem_id = Column(Integer, ForeignKey('problem.problem_id'))\n language = Column(LanguageEnum, nullable=False)\n status = Column(StatusEnum, nullable=False)\n code = Column(String, nullable=False)\n solution_time = Column(DateTime(timezone=True), nullable=False)\n\n def get_status(self):\n return Status[self.status]\n\n events = relationship('SolutionEvent', backref='solution', order_by='SolutionEvent.event_time')\n\n judgements = relationship('Judgement', backref='solution', order_by='desc(Judgement.judgement_id)')\n\n\nclass Judgement(Base):\n __tablename__ = 'judgement'\n\n judgement_id = Column(Integer, primary_key=True)\n solution_id = Column(Integer, ForeignKey('solution.solution_id'))\n testcase_id = Column(Integer, ForeignKey('testcase.testcase_id'))\n verdict = Column(VerdictEnum, nullable=False)\n solved = Column(Boolean)\n runtime = Column(Float)\n memory = Column(Float)\n\n def get_verdict(self):\n return Verdict[self.verdict]\n\n\n\nclass SolutionEvent(Base):\n __tablename__ = 'solution_event'\n\n solution_event_id = Column(Integer, primary_key=True)\n solution_id = Column(Integer, ForeignKey('solution.solution_id'))\n testcase_id = Column(Integer, ForeignKey('testcase.testcase_id'))\n old_status = Column(StatusEnum, nullable=False)\n new_status = Column(StatusEnum, nullable=False)\n event_time = Column(DateTime(timezone=True), nullable=False)\n\n\n","repo_name":"simonlindholm/programming-battle","sub_path":"battle/battle/models/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"44856797944","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nclass CitysSpider(scrapy.Spider):\n name = 'citys'\n allowed_domains = ['ke.com']\n start_urls = ['https://www.ke.com/city/']\n\n def parse(self, response):\n city_list = response.xpath('//*[@class=\"city_recommend\"]/div[1]/div[2]/ul/li')\n for citys in city_list:\n city_letter = citys.xpath('./div[1]/span/text()').get()\n city_tit = citys.xpath('./div[2]/div/div/text()').get().strip()\n city_province = citys.xpath('./div[2]/div/ul/li')\n for cityss in city_province:\n city_name = cityss.xpath('./a/text()').get()\n urls = \"https:\" + cityss.xpath('./a/@href').get()\n requests = scrapy.Request(url=urls, callback=self.get_housing)\n yield requests\n\n def get_housing(self, response):\n response.xpath('//*[@class=\"nav typeUserInfo\"]/ul/li')\n\n","repo_name":"guohuian/beikespider","sub_path":"beike/beike/spiders/citys.py","file_name":"citys.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1525791148","text":"import requests, json, time\r\n\r\nunitsSummary = {} # contains the created id + unit name to easy display\r\nlistValues = [] # contains a list of all characteristics value of the selected unit\r\nlistUnits = [] # contains created units objects\r\n\r\nclass unit:\r\n ATB = 0\r\n def __init__(self,dic):\r\n self.name = dic.get('name')\r\n self.life = dic.get('hit_points')\r\n self.attackPoint = self.getAttackPoint(dic)\r\n self.cacArmor = self.getArmorcac(dic)\r\n self.rangeArmor = self.getRangeArmor(dic)\r\n self.reload_time = dic.get('reload_time')\r\n \r\n def isAlive(self):\r\n if self.life > 0:\r\n return True\r\n else:\r\n print(f'{self.name} trépasse !')\r\n return False\r\n \r\n def getAttackPoint(self,dic):\r\n exists = dic.get('attack')\r\n if exists:\r\n return exists\r\n else:\r\n return 0\r\n \r\n def getArmorcac(self,dic):\r\n stats = dic.get('armor')\r\n stats = stats.split('/')\r\n stats = stats[0]\r\n return stats\r\n\r\n def getRangeArmor(self,dic):\r\n stats = dic.get('armor')\r\n stats = stats.split('/')\r\n stats = stats[1]\r\n return stats\r\n\r\n def getATB(self):\r\n self.ATB += 1\r\n if self.ATB < self.reload_time:\r\n return False\r\n else:\r\n self.ATB = 0\r\n return True\r\n\r\n\r\n\r\nclass physicalUnit(unit):\r\n position = 0\r\n typeUnit = 'Unité au corps à corps'\r\n def __init__(self,dic):\r\n super().__init__(dic)\r\n self.speed = dic.get('movement_rate')\r\n\r\n def attack(self, oponent):\r\n if oponent.typeUnit == 'Unité à distance':\r\n while self.position < oponent.range:\r\n print(f'{self.name} se rapproche ...')\r\n self.position += self.speed\r\n return\r\n canAttack = self.getATB()\r\n if canAttack:\r\n if self.attackPoint == 0:\r\n print(f'{self.name} ne fait aucun dégât !')\r\n return\r\n calcul = int(self.attackPoint) - int(oponent.cacArmor)\r\n if calcul < 1:\r\n calcul = 1\r\n oponent.life -= calcul\r\n else:\r\n oponent.life -= calcul\r\n absorbed = self.attackPoint - calcul\r\n armorMessage = f\"L'armure corps à corps a absorbé {absorbed} dégâts.\"\r\n print(f\"{self.name} inflige {calcul} dégâts à {oponent.name} ! {armorMessage}\" )\r\n else:\r\n return\r\n\r\nclass rangeUnit(unit):\r\n typeUnit = 'Unité à distance'\r\n def __init__(self,dic):\r\n super().__init__(dic)\r\n self.range = self.getRange(dic)\r\n self.speed = dic.get('movement_rate')\r\n self.position = self.getRange(dic)\r\n\r\n def getRange(self,dic):\r\n stat = dic.get('range')\r\n if (self.name).lower() == 'archer':\r\n return 3\r\n else:\r\n try:\r\n if '.' in stat or '(' in stat:\r\n stat = int(stat[0])\r\n return stat\r\n elif len(stat) > 2:\r\n statList = stat.split('-')\r\n stat = int(statList[1])\r\n return stat\r\n except TypeError:\r\n return stat\r\n\r\n def attack(self, oponent):\r\n if oponent.typeUnit == 'Unité à distance':\r\n while self.position < oponent.range:\r\n print(f'{self.name} se rapproche ...')\r\n self.position += self.speed\r\n return\r\n canAttack = self.getATB()\r\n if canAttack:\r\n if self.attackPoint == 0:\r\n print(f'{self.name} ne fait aucun dégât !')\r\n return\r\n calcul = int(self.attackPoint) - int(oponent.rangeArmor)\r\n if calcul < 1:\r\n calcul = 1\r\n oponent.life -= calcul\r\n else:\r\n oponent.life -= calcul\r\n absorbed = self.attackPoint - calcul\r\n armorMessage = f\"L'armure perçage a absorbé {absorbed} dégâts.\"\r\n print(f\"{self.name} inflige {calcul} dégâts à {oponent.name} ! {armorMessage}\" )\r\n else:\r\n return\r\n\r\ndef choiceUnit():\r\n global listUnits\r\n while len(listUnits) < 2:\r\n choice = int(input())\r\n selected = response[choice - 1]\r\n testType = selected.get('range')\r\n if testType:\r\n listUnits.append(rangeUnit(selected))\r\n else:\r\n listUnits.append(physicalUnit(selected))\r\n for i in listUnits:\r\n print(f'{i.name} : {i.typeUnit}')\r\n print(f'Armure corps à corps : {i.cacArmor} | Armure perçage : {i.rangeArmor}')\r\n\r\ndef battle(objectlist):\r\n while objectlist[0].isAlive() or objectlist[1].isAlive():\r\n for i in range(len(objectlist)):\r\n objectlist[i].attack(objectlist[i-1])\r\n time.sleep(0.5)\r\n for i in range(len(objectlist)):\r\n if objectlist[i].isAlive() == False:\r\n for i in objectlist:\r\n if i.life > 0:\r\n print(f'{i.name} est victorieux !')\r\n return\r\n\r\nr = requests.get('https://age-of-empires-2-api.herokuapp.com/api/v1/units')\r\nresponse = json.loads(r.content)\r\nresponse = response.get('units')\r\n\r\nfor i in range(len(response)):\r\n id = response[i].get('id')\r\n name = response[i].get('name')\r\n unitsSummary.setdefault(id,name)\r\n\r\nfor id,name in unitsSummary.items():\r\n print(f'{id} : {name}')\r\n\r\nchoiceUnit()\r\nbattle(listUnits)","repo_name":"vestibules/aoe2unitversus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34049897886","text":"import numpy as np\nimport cv2\nfrom scipy.spatial.transform import Rotation\n\n\ndef read_tartantic_intrinsic():\n K = np.eye(4)\n K[0, 0] = 320.0\n K[1, 1] = 320.0\n K[0, 2] = 320.0\n K[1, 2] = 240.0\n\n return K\n\n\ndef read_tartanair_extrinsic(extrinsic_fn, side='left'):\n data = {}\n camera_id = {'left': 0, 'right': 1}\n with open(extrinsic_fn, 'r') as fp:\n lines = fp.readlines()\n # poses = np.loadtxt(extrinsic_fn)\n # for lineid, pose in enumerate(poses):\n for lineid, line in enumerate(lines):\n frame = int(lineid)\n camera = int(camera_id[side])\n key = 'T_cam{}'.format(int(camera))\n inv_key = 'inv_T_cam{}'.format(int(camera))\n values = line.rstrip().split(' ')\n assert len(values) == 7, 'Pose must be quaterion format -- 7 params, but {} got'.format(len(values))\n pose = np.array([float(values[i]) for i in range(len(values))])\n tx, ty, tz, qx, qy, qz, qw = pose\n R = Rotation.from_quat((qx, qy, qz, qw)).as_matrix()\n t = np.array([tx, ty, tz])\n matrix = np.eye(4)\n matrix[:3, :3] = R.transpose()\n matrix[:3, 3] = -R.transpose().dot(t)\n # ned(z-axis down) to z-axis forward\n m_correct = np.zeros_like(matrix)\n m_correct[0, 1] = 1\n m_correct[1, 2] = 1\n m_correct[2, 0] = 1\n m_correct[3, 3] = 1\n matrix = np.matmul(m_correct, matrix)\n\n item = {\n key: matrix,\n inv_key: np.linalg.pinv(matrix),\n }\n data['Frame{}:{}'.format(frame, camera)] = item\n lineid += 1\n\n return data\n\n\ndef read_tartanair_depth(depth_fn, K=np.array([[320.0, 0, 320.0, 0],\n [0, 320, 240.0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])):\n\n if '.npy' in depth_fn:\n depth = np.load(depth_fn)\n elif '.png' in depth_fn:\n depth = cv2.imread(depth_fn, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)\n # [0, 655.35] meter\n depth = depth / 100.0\n else:\n raise TypeError('only support png and npy format, invalid type found: {}'.format(depth_fn))\n\n f = K[0, 0]\n b = 0.25 # meter\n\n disp = b * f / (depth + 1e-5)\n\n return depth, disp\n\ndef read_tartanair_flow(flow_fn):\n \"\"\"Convert to (h, w, 2) (flow_x, flow_y) float32 array\"\"\"\n\n out_flow = np.load(flow_fn)\n\n return out_flow\n","repo_name":"youmi-zym/TemporalStereo","sub_path":"architecture/data/utils/load_tartanair.py","file_name":"load_tartanair.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"62"} +{"seq_id":"43786186180","text":"import numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport heapq\n\n\n\ndfcf = pd.read_csv(\"C:/Users/fx168/Desktop/tmp/USDCNH60.csv\")\nClosePrice = dfcf.loc[:, ['Close']]\nClosePrice = pd.Series(ClosePrice.values.ravel())\nstep = 2040\n\n#HighPrice = dfcf.loc[:, ['High']]\n#HighPrice = pd.Series(HighPrice.values.ravel())\n\n#LowPrice = dfcf.loc[:, ['Low']]\n#LowPrice = pd.Series(LowPrice.values.ravel())\n\naclose = np.array(ClosePrice[0:step])\n#bhigh = np.array(HighPrice[0:step])\n\n# s1=aclose\ns1 = ClosePrice\nmacd = [0] * step\n\nema12 = [0] * step\nema12[0] = s1[0]\n\nema26 = [0] * step\nema26[0] = s1[0]\n\nfor i in range(step - 1):\n ema12[i + 1] = (11 / 13 * ema12[i] + 2 / 13 * s1[i + 1])\n ema26[i + 1] = (25 / 27 * ema26[i] + 2 / 27 * s1[i + 1])\n\ndif = [0] * step\n\nfor i in range(step):\n dif[i] = (ema12[i] - ema26[i])\n\ndea = [0] * step\n\nfor i in range(step - 1):\n dea[i + 1] = 8 / 10 * dea[i] + 2 / 10 * dif[i + 1]\n macd[i + 1] = 2 * (dif[i + 1] - dea[i + 1])\n\n\n###2040/15=136组数据\n\ndea = dea[0:step]\ndayspan = 50\nwax_x = [] # 波峰x 记录索引,为天数\nwax_y = [] # 波峰y\nwane_x = [] # 波谷x\nwane_y = [] # 波谷y 记录索引为DEA值\nfor i in range(0, step, dayspan):\n tmp = dea[i:i + dayspan]\n wave_wax = max(enumerate(tmp), key=lambda x: x[1])\n wave_wane = min(enumerate(tmp), key=lambda x: x[1])\n\n wax_x.append(wave_wax[0] + i)\n\n wax_y.append(wave_wax[1])\n wane_y.append(wave_wane[1])\n wane_x.append(wave_wane[0] + i)\n\n\n\n\nnewWaneY = []\nnewWaneX = []\n# 两个高坡间隔下确认一个min\nfor i in range(0, len(wax_y), 2):\n tmp = wane_y[i:i + 2]\n tmp = min(enumerate(tmp), key=lambda x: x[1])\n newWaneY.append(tmp[1])\n # newWaneX.append(wave_wane_y.index(newWaneY))\nprint('wax_y',len(wax_y))\nprint('newWaneY:',newWaneY)\nprint('len newWaneY',len(newWaneY))\nfor i in range(len(newWaneY)):\n a = wane_y.index(newWaneY[i]) #找到在低谷值中的索引,即天数\n newWaneX.append(wane_x.pop(a - i)) ###把ok的低谷数值取出,每pop一次,数据长度就减少1。记录索引,\n ### 为天数\n\nconnectx=wax_x+newWaneX ##调整了所有低谷+未调整的高峰\nconnectx=sorted(connectx)\n#######根据调整了的低谷去整理对应的y值\naaa=[]\nfor i in range(len(newWaneX)): #低谷长度\n aaa.append(connectx.index(newWaneX[i])) #所有天数中找出是第几天\n\nfor i in range(len(newWaneX)):\n wax_y.insert(aaa[i],newWaneY[i]) #不调整的高峰数据,+经过调整的低谷数据\n############################低谷ok后把不合规的高峰删\nnewWaxX=[]\nnewWaxY=[]\na=[]\nfor i in range(len(newWaneX)):\n tmp=connectx.index(newWaneX[i])\n a.append(tmp) #把处理好的波谷挑出,波谷之间的间隙就是要比较的波峰\n'''\nprint('a',a)\nprint('newWaneX',newWaneX)\nprint('connectx',len(connectx))\nprint(len(wax_y))\n'''\nwax_y.pop(28)\nconnectx.pop(28)\nwax_y.pop(30)\nconnectx.pop(30)\nwax_y.pop(30)\nconnectx.pop(30)\nwax_y.pop(33)\nconnectx.pop(33)\n\n#print(connectx.index(913),connectx.index(983),connectx.index(1029),connectx.index(1116),connectx.index(1141),connectx.index(1150),connectx.index(1292))\n\nBehind=[]\nAbove=[]\nfor i in range(len(wax_y)):\n tmp=wax_y[i]\n if (tmp>0):\n Above.append(tmp)\n else:\n Behind.append(tmp)\nprint('DEA>0 mean',sp.mean(Above))\nprint('DEA>0 std',sp.std(Above))\n\nprint('DEA<0 mean',sp.mean(Behind))\nprint('DEA<0 std',sp.mean(Behind))\n\n\n\nwax_x2 = [] # 波峰x 记录索引,为天数\nwax_y2 = [] # 波峰y\nwane_x2 = [] # 波谷x\nwane_y2 = [] # 波谷y 记录索引为DEA值\nfor i in range(0, 41, 2):\n tmp = wax_y[i:i + 2]\n wave_wax = max(enumerate(tmp), key=lambda x: x[1])\n wave_wane = min(enumerate(tmp), key=lambda x: x[1])\n\n wax_x2.append(wave_wax[0] + i)\n\n wax_y2.append(wave_wax[1])\n wane_y2.append(wave_wane[1])\n wane_x2.append(wave_wane[0] + i)\nperiod=250*abs(sp.mean(wax_x2)-sp.mean(wane_x2))\nprint('cycle time is ' ,period)\nday=[]\nfor i in range(len(wane_x2)-1):\n tmp=wane_x2[i+1]-wane_x2[i]\n day.append(tmp)\ninterval=sp.mean(day)*250\nprint('interval of behind ',interval)\n\n\n######\n#作图\n#####\nplt.figure(figsize=(6,6))\nax = plt.subplot(2, 1, 1)\n\nax.plot(aclose, label='closePrice', color='b')\nplt.grid(True)\n#plt.subplots_adjust(top=None, bottom=None)\nax2 = plt.subplot(2, 1, 2)\n################\nfor i in range(len(connectx)-1):\n plt.plot([connectx[i], connectx[i + 1]], [wax_y[i], wax_y[i + 1]], color='black')\n######################\nplt.plot(dea, label='DEA', color='crimson')\nplt.xlabel('working hours')\n\nplt.grid(True)\n\nplt.legend()\nplt.show()\n\n","repo_name":"skywalker-young/stock_feature-research","sub_path":"indicator/wane_wax_version.py","file_name":"wane_wax_version.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5070742998","text":"import json\ndata_path = './data/MultiWOZ/data_for_sequicity.json'\nwith open(data_path, \"r\") as f:\n data = json.load(f)\nrequest = {\"[attraction]\": [], \"[hospital]\": [], \"[hotel]\": [], \"[police]\": [], \"[restaurant]\": [],\n \"[taxi]\": [], \"[train]\": [], \"[general]\": []}\nfor value in data:\n for turn in value[\"log\"]:\n domains = []\n for domain in turn[\"turn_domain\"].split(\" \"):\n domains.append(domain)\n response = turn[\"resp\"]\n for word in response.split(\" \"):\n if word:\n if word[0] == '[':\n if word not in request[domain]:\n request[domain].append(word)\nreq_path = './data/MultiWOZ/requirement.json'\nwith open(req_path, \"w\") as f:\n json.dump(request, f, indent=4, separators=(\",\", \": \"))","repo_name":"Silin159/PARG","sub_path":"CamRest676/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"74763227396","text":"from django.contrib.admin.widgets import AdminDateWidget\nfrom django import forms\nfrom .models import GENRE_CHOICES, Movie \n\n\nclass MovieForm(forms.ModelForm):\n genre = forms.CharField(\n max_length=30,\n widget=forms.Select(choices=GENRE_CHOICES),\n )\n release_date = forms.DateField(\n widget = forms.DateInput(\n attrs= {'type':'date'}\n )\n )\n score = forms.FloatField(\n widget = forms.NumberInput(\n attrs = {\n 'min': '0',\n 'max': '10',\n }\n )\n )\n class Meta:\n model = Movie\n fields = '__all__'","repo_name":"junwoo0127/Project","sub_path":"pjt06/movies/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36091406133","text":"# https://leetcode.com/problems/minimum-increment-to-make-array-unique/\n\n\n\nclass Solution:\n def minIncrementForUnique(self, nums: List[int]) -> int:\n nums.sort()\n visited = set()\n moves = 0\n _max = 0\n \n for i in range(len(nums)):\n if nums[i] not in visited:\n visited.add(nums[i])\n _max = max(_max, nums[i])\n else:\n moves += abs(_max - nums[i] + 1)\n nums[i] += abs(_max - nums[i] + 1)\n _max = max(_max, nums[i])\n visited.add(nums[i])\n \n return moves\n \n","repo_name":"hello-world-was-taken/A2SV_Competetive_Programming","sub_path":"week_14/minimum-increment-to-make-array-unique.py","file_name":"minimum-increment-to-make-array-unique.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"15431689662","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom loader import dp\nfrom states.wcalc import Warranty_calculation, Cvalue_calculation\nfrom keyboards.inline import calcs_markup, vehicle_markup\nimport re\nimport datetime\nimport sqlite3\nimport pandas as pd\n\n\ndef wcalc_algorithm(vehicle, code, weight, value):\n wtotal = 0\n if vehicle == 'Автомобільний':\n if code == '2203' or code == '2204' or code == '2205' or code == '2206':\n if value <= 400000:\n wtotal = 500\n else:\n wtotal = value*0.05/100\n if wtotal < 500:\n wtotal = 500\n elif code == '2207' or code == '2208':\n wtotal = value*0.05/100\n if wtotal < 500:\n wtotal = 500\n elif value <= 50000:\n wtotal = 250\n elif (value > 50000) and (value <= 100000):\n wtotal = 250\n elif (value > 100000) and (value <= 200000):\n wtotal = 500\n elif (value > 200000) and (value <= 300000):\n wtotal = 500\n elif (value > 300000) and (value <= 500000):\n wtotal = 500\n elif (value > 500000) and (value <= 800000):\n wtotal = 500\n elif (value > 800000) and (value <= 1000000):\n wtotal = 500\n elif (value > 1000000) and (value <= 1500000):\n wtotal = value*0.05/100\n elif (value > 1500000) and (value <= 2000000):\n wtotal = value*0.05/100\n elif value > 2000000:\n wtotal = value*0.05/100\n if vehicle == 'Залізничний':\n usd = 26\n if code == '2710' or code == '2707':\n wtotal = weight/1000*0.3*usd\n elif code == '2711':\n wtotal = weight/1000*0.35*usd\n elif code == '2709' or code == '2905':\n wtotal = weight/1000*0.25*usd\n elif code == '2207' or '2208':\n wtotal = value*0.2/100\n elif code == '2204':\n wtotal = weight/1000*0.32*usd\n elif code == '3105':\n wtotal = weight/1000*0.25*usd\n elif code == '2909':\n wtotal = weight/1000*0.4*usd\n elif value <= 50000:\n wtotal = 450\n elif (value > 50000) and (value <= 100000):\n wtotal = 620\n elif (value > 100000) and (value <= 200000):\n wtotal = 1000\n elif (value > 200000) and (value <= 300000):\n wtotal = 1200\n elif (value > 300000) and (value <= 500000):\n wtotal = 1500\n elif (value > 500000) and (value <= 800000):\n wtotal = 1700\n elif (value > 800000) and (value <= 1000000):\n wtotal = 2100\n elif (value > 1000000) and (value <= 1500000):\n wtotal = 2800\n elif (value > 1500000) and (value <= 2000000):\n wtotal = 3300\n elif value > 2000000:\n wtotal = value*0.2/100\n if vehicle == 'Морський':\n wtotal = weight/1000*0.25*usd\n if vehicle == 'Трубопровідний':\n wtotal = weight/1000*0.35*usd\n return wtotal\n\n@dp.message_handler(text=\"🧮 Калькулятори\")\nasync def bot_represents(message: types.Message):\n await message.answer('КАЛЬКУЛЯТОРИ')\n await message.answer('Оберіть калькулятор для попереднього розрахунку''',\n reply_markup=calcs_markup)\n\n\n#Warranty calculation process\n@dp.callback_query_handler(text_contains=\"warranty_calc\")\nasync def warranty_calculator(call: types.CallbackQuery):\n await call.answer(cache_time=60)\n await call.message.answer('💶 ВАРТІСТЬ ГАРАНТІЇ')\n await call.message.answer('⚠️Для попереднього розрахунку вартості фінансової гарантії надайте послідовно відповіді на наступні 4 питання')\n await call.message.answer('1️⃣Оберіть вид транспортного засобу:', reply_markup=vehicle_markup)\n await Warranty_calculation.vehicle_state.set()\n\n@dp.callback_query_handler(text=['auto', 'railway', 'sea', 'pipeline'], state=Warranty_calculation.vehicle_state)\nasync def answer_vehicle(call: types.CallbackQuery, state: FSMContext):\n await call.answer(cache_time=60)\n if call.data == 'auto':\n vehicle = 'Автомобільний'\n elif call.data == 'railway':\n vehicle = 'Залізничний'\n elif call.data == 'sea':\n vehicle = 'Морський'\n elif call.data == 'pipeline':\n vehicle = 'Трубопровідний'\n await state.update_data(answer1=vehicle)\n await call.message.edit_reply_markup(reply_markup=None)\n await call.message.answer(vehicle)\n await call.message.answer(\"2️⃣Введіть код товару на рівні 4-х знаків:\")\n await Warranty_calculation.next()\n\n@dp.message_handler(state=Warranty_calculation.cncode_state)\nasync def answer_cncode(message: types.Message, state: FSMContext):\n if re.match(r'\\d{4}', message.text) and len(message.text) == 4:\n cncode = message.text\n await state.update_data(answer2=cncode)\n await message.answer(\"3️⃣Введіть вагу товарів в кілограмах:\")\n await Warranty_calculation.next()\n else:\n await message.answer('❗️Код товару має містити тільки перших 4 знаки (товарна позиція)!!! Очікую на коректний код товару...')\n\n@dp.message_handler(state=Warranty_calculation.weight_state)\nasync def answer_weight(message: types.Message, state: FSMContext):\n if re.match(r'^[0-9]+$', message.text):\n weight = message.text\n await state.update_data(answer3=weight)\n await message.answer(\"4️⃣Введіть суму митних платежів:\")\n await Warranty_calculation.next()\n else:\n await message.answer('❗️Вага товару має містити тільки цифри!!! Очікую на коректну вагу товару...')\n\n@dp.message_handler(state=Warranty_calculation.value_state)\nasync def answer_value(message: types.Message, state: FSMContext):\n if re.match(r'^[0-9]+$', message.text):\n data = await state.get_data()\n vehicle = data.get(\"answer1\")\n cncode = data.get(\"answer2\")\n weight = data.get(\"answer3\")\n value = message.text\n await state.update_data(answer4=value)\n price = str(float(\"{0:.2f}\".format(wcalc_algorithm(vehicle,cncode,int(weight),int(value)))))\n result = 'Вид транспорту: '+str(vehicle)+'\\nКод товару: '+str(cncode)+'\\nВага товару: '+str(weight)+' кг\\nСума платежів: '+str(value)+' грн\\n-----------------------------------------------------\\nВартість гарантії: '+price+' грн'\n\n userid = message.from_user.id\n fullname = message.from_user.full_name\n date = datetime.datetime.now()\n try:\n conn = sqlite3.connect('/home/agmorev/pentadabot_v2/data/pentada.db')\n cursor = conn.cursor()\n print('---------------------CALCULATOR--------------------------')\n print(\"Calculator successfully connected to SQLite | \", fullname, ' | ', date)\n query2 = \"INSERT INTO calcs ('userid', 'fullname', 'vehicle', 'code', 'weight', 'value', 'price', 'date') VALUES (?, ?, ?, ?, ?, ?, ?, ?);\"\n variables = (userid, fullname, vehicle, cncode, weight, value, price, date)\n cursor.execute(query2, variables)\n conn.commit()\n print(\"Record inserted successfully into calcs table \", cursor.rowcount)\n print(userid, fullname, vehicle, cncode, weight, value, price, date)\n cursor.close()\n\n except sqlite3.Error as error:\n print(\"Failed to insert data into sqlite table\", error)\n finally:\n if (conn):\n conn.close()\n print(\"The SQLite connection is closed\")\n\n await message.answer('‼️ Розрахунок є попереднім ‼️\\nОстаточну вартість буде узгоджено при укладанні договору з гарантом.')\n await message.answer(result)\n await state.finish()\n else:\n await message.answer('❗Сума митних платежів має бути цілим числом!!!!!! Очікую на коректну суму...')\n\n\n#Customs value calculation process\n@dp.callback_query_handler(text_contains=\"customs_value\")\nasync def cvalue_calculator(call: types.CallbackQuery):\n await call.answer(cache_time=60)\n await call.message.answer('💲 ПОКАЗНИКИ МИТНОЇ ВАРТОСТІ')\n await call.message.answer('⚠️Розрахунок показників митної вартості за кодом товару та країною походження')\n await call.message.answer('👨‍💻 Введіть код товару (10 знаків):')\n await Cvalue_calculation.cncode_state.set()\n\n@dp.message_handler(state=Cvalue_calculation.cncode_state)\nasync def cvalue_cncode(message: types.Message, state: FSMContext):\n if re.match(r'\\d{10}', message.text) and len(message.text) == 10:\n cncode = message.text\n await state.update_data(answer1=cncode)\n await message.answer(\"👨‍💻Введіть країну походження у форматі скороченої назви (PL) або повної (Польща):\")\n await Cvalue_calculation.next()\n else:\n await message.answer('❗️Код товару має містити 10 знаків!!! Очікую на коректний код товару...')\n\n@dp.message_handler(state=Cvalue_calculation.country_state)\nasync def cvalue_country(message: types.Message, state: FSMContext):\n if re.match(r'^[A-ZА-я -]+$', message.text):\n doc = open('/home/agmorev/pentadabot_v2/data/waiting.mp4', 'rb')\n msg = await message.answer_animation(doc, caption='Зачекайте...')\n df = pd.read_excel('/home/agmorev/pentadabot_v2/data/cvalue.xlsx', sheet_name=0)\n country = message.text\n data = await state.get_data()\n cncode = data.get(\"answer1\")\n df['Країна походження товару'] = df['Країна походження товару'].str.upper()\n try:\n cond = df[(df['Код УКТЗЕД Підкатегорія - 10 знаків'] == int(cncode)) & (df['Країна походження товару'].str.contains(country.upper()))]\n min_value = cond['Мінімальна митна вартість'].values[0]\n avr_value = cond['Середня митна вартість'].values[0]\n max_value = cond['Максимальна митна вартість'].values[0]\n result = '*Код товару:* '+str(cncode)+'\\n*Країна походження:* '+str(country)+'\\n*Мін. митна вартість:* '+str(\"{:.2f}\".format(min_value))+'$\\n*Сер. митна вартість:* '+str(\"{:.2f}\".format(avr_value))+'$\\n*Макс. митна вартість:* '+str(\"{:.2f}\".format(max_value))+'$'\n await msg.delete()\n await message.answer('‼️ Розрахунок здійснено на підст��ві відкритих даних Держмитслужби')\n await message.answer(result, parse_mode='Markdown')\n await state.finish()\n except:\n await msg.delete()\n await message.answer('‼️ Запис за вказаним запитом не знайдено')\n await state.finish()\n\n userid = message.from_user.id\n fullname = message.from_user.full_name\n date = datetime.datetime.now()\n try:\n conn = sqlite3.connect('/home/agmorev/pentadabot_v2/data/pentada.db')\n # conn = sqlite3.connect('D:\\PYTHON\\PROJECTS\\Bots\\pentadabot_v2\\data\\pentada.db')\n cursor = conn.cursor()\n print(\"Customs value block successfully connected to SQLite | \", fullname, ' | ', date)\n query4 = \"INSERT INTO cvalue ('userid', 'fullname', 'cncode', 'country', 'date') VALUES (?, ?, ?, ?, ?);\"\n variables = (userid, fullname, cncode, country, date)\n cursor.execute(query4, variables)\n conn.commit()\n print(\"Record inserted successfully into cvalue table \", cursor.rowcount)\n print(userid, fullname, cncode, country, date)\n cursor.close()\n\n except sqlite3.Error as error:\n print(\"Failed to insert data into sqlite table\", error)\n finally:\n if (conn):\n conn.close()\n print(\"The SQLite connection is closed\")\n\n else:\n await message.answer('❗️Очікую на коректне введення країни походження...')\n\n\n\n\n\n\n\n\n\n\n\n#Customs payments calculation process\n@dp.callback_query_handler(text_contains=\"payments_calc\")\nasync def customs_calculator(call: types.CallbackQuery):\n await call.answer(cache_time=60)\n await call.message.answer('💵 МИТНІ ПЛАТЕЖІ')\n await call.message.answer('❗️Для попереднього розрахунку митних платежів заповніть наступну форму')","repo_name":"agmorev/pentada-bot","sub_path":"handlers/users/calcs.py","file_name":"calcs.py","file_ext":"py","file_size_in_byte":13532,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11877398113","text":"import torch as t\n\n\nclass GenRNN(t.nn.Module):\n def __init__(self, input_size, hidden_size, output_size, n_layers=1):\n super(GenRNN, self).__init__()\n self.n_layers = n_layers\n self.hidden_dim = hidden_size\n self.rnn = t.nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=n_layers,\n batch_first=True\n )\n self.dropout = t.nn.Dropout(p=0.5)\n self.bn = t.nn.BatchNorm1d(num_features=hidden_size)\n self.fc = t.nn.Linear(hidden_size, output_size)\n\n def forward(self, inputs, h_state=None):\n batch_size, seq_len, feature = inputs.shape\n # 初始化隐藏状态\n if h_state is None:\n # print(self.n_layers, batch_size, self.hidden_dim)\n h_0 = inputs.data.new(self.n_layers, batch_size, self.hidden_dim).fill_(0).float()\n c_0 = inputs.data.new(self.n_layers, batch_size, self.hidden_dim).fill_(0).float()\n h_state = (h_0, c_0)\n\n output, h_state = self.rnn(inputs, h_state)\n\n # output = self.fc2(self.dropout(self.fc1(self.dropout(output.contiguous().view(batch_size * seq_len, -1)))))\n # output = self.bn(output.contiguous().view(batch_size * seq_len, -1))\n output = self.fc(output.contiguous().view(batch_size * seq_len, -1))\n return output, h_state\n","repo_name":"zhwzhong/Compress","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30248384421","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg1 = cv2.imread('../../img/circles.jpg')\nimg2 = cv2.imread('../../img/loading.jpg')\n\nimg1 = cv2.resize(img1,img2.shape[:2])\n\n# print img1.shape, img2.shape[:2]\n\ndst = cv2.addWeighted(img1,0.5,img2,1,0)\n\ncv2.imshow('dst',dst)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"lichaopeng/OpenCV3_Practice","sub_path":"test/CoreOperations/blending.py","file_name":"blending.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40460079117","text":"import sys\nimport os\nimport asyncio\nimport io\nfrom ..utils import Zbot\nfrom Zaid import Zaid\nfrom config import OWNER_ID\nfrom Zaid.Plugins.mongodb.chats_db import get_total_chats, get_all_chat_id\nfrom Zaid.Plugins.mongodb.notes_db import get_total_notes\nfrom Zaid.Plugins.mongodb.filters_db import get_total_filters\nfrom Zaid.Plugins.mongodb.rules_db import get_total_rules\nfrom Zaid.Plugins.mongodb.welcome_db import get_total_welcome\nfrom Zaid.Plugins.mongodb.nightmode_db import get_total_nightmode\nfrom Zaid.Plugins.mongodb.locks_db import get_total_locks\n\n@Zbot(pattern=\"^/stats ?(.*)\", from_users=[OWNER_ID])\nasync def stats(event):\n a = get_total_notes()\n b = len(Zaid.list_event_handlers())\n c = get_total_chats()\n d = get_total_filters()\n e = get_total_welcome()\n f = get_total_rules()\n g = get_total_nightmode()\n h = get_total_locks()\n await event.reply(f\"✘ Current Stats\\n‣ Total Notes: {a}\\n‣ Total Commands: {b}\\n‣ Total Chats: {c}\\n‣ Total Filters: {d}\\n‣ Welcome: {e}\\n‣ Total Rules: {f}\\n‣Total Nightmode: {g}\\n‣ Total Locks: {h}\")\n\n\n\n@Zbot(pattern=\"^/restart$\", from_users=[OWNER_ID])\nasync def restart(e):\n await e.reply(\"**__Restarting....__**\")\n args = [sys.executable, \"-m\", \"Zaid\"]\n os.execle(sys.executable, *args, os.environ)\n\n\n\n@Zbot(pattern=\"^/broadcast ?(.*)\", from_users=[OWNER_ID])\nasync def bc(event):\n if not event.sender_id in [OWNER_ID]:\n return await event.reply(\n \"You don't have access to use this, visit @TheSupportChat.\"\n )\n if event.reply_to:\n r = await event.get_reply_message()\n b_text = r.text\n b_file = r.media\n elif event.pattern_match.group(1):\n b_text = event.text.split(None, 1)[1]\n b_file = None\n chats = get_all_chat_id()\n s = f = 0\n for chat in chats:\n try:\n await event.client.send_message(int(chat), b_text, file=b_file)\n s += 1\n except:\n f += 1\n await event.reply(f\"Sucessfully broadcasted, Sucess in {s} chats, {f} failed\")\n","repo_name":"kannadigaXD/Chatbot","sub_path":"Zaid/Plugins/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"32897692881","text":"import webvtt\nfrom datetime import datetime\nfrom jinja2 import Template\nimport json\nimport pprint\nimport os\nimport re\nimport argparse\n\n\ndef parse_subtitle(subtitle_file):\n tmp_list = []\n for caption in webvtt.read(subtitle_file):\n txt = caption.text.replace(\"\\n\", \" \")\n pt = datetime.strptime(caption.start, '%H:%M:%S.%f')\n sec = pt.second + pt.minute*60 + pt.hour*3600\n tmp_list.append({\n \"time\": caption.start,\n \"sec\": sec,\n \"text\": txt\n })\n return tmp_list\n\n\ndef parse_yt_json(json_file):\n if os.path.isfile(json_file):\n with open(json_file) as json_file:\n data = json.load(json_file)\n tmp_sub_file = \"tmp_subtitles.txt\"\n with open(tmp_sub_file, \"w\") as fp:\n fp.write(data[\"subtitles\"])\n subtitles_list = parse_subtitle(tmp_sub_file)\n data[\"subtitles_list\"] = subtitles_list\n return data\n return None\n\n\ndef format_video_time(desc, video_code):\n m1 = re.findall(r'^\\d{1,2}:\\d{1,2}:\\d{1,2}', desc)\n m2 = re.findall(r'^\\d{2}:\\d{1,2}', desc)\n ma = m1 + m2\n for item in ma:\n desc = desc.replace(item, \"%s\" % (video_code, item))\n print(item)\n\n\ndef format_description(desc):\n return desc.replace(\"\\n\", \"
    \")\n\n\ndef generate_page(yt_obj, template_file):\n with open(template_file) as fp:\n temp_text = fp.read()\n t = Template(temp_text)\n # format_video_time(yt_obj[\"desc\"], \"aaaaaaauuuuuuuuuu\")\n return t.render(\n subtitles_list=yt_obj[\"subtitles_list\"],\n code=yt_obj[\"code\"],\n description=format_description(yt_obj[\"desc\"]),\n title=yt_obj[\"title\"],\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--json-input', help='Generated json file')\n parser.add_argument('--page-template', help='Jinja2 template', default=\"podcast_template.j2\")\n parser.add_argument('--output', help='Output file', default=\"page.html\")\n\n args = parser.parse_args()\n\n if args.json_input:\n with open(args.output, \"w\") as fp:\n dd = parse_yt_json(args.json_input)\n page = generate_page(dd, args.page_template)\n fp.write(page)\n print(\"Page generated %s \" % args.output)\n","repo_name":"ja-pa/huberman_podcast","sub_path":"generate_page.py","file_name":"generate_page.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"72857334596","text":"#!/usr/bin/env python\n# coding:utf-8\n\n\nclass Student:\n\n def __init__(self, name):\n self._name = name\n\n # __slots__ = ('_name', '_age', '_score', '_grade')\n # to confine the attributes of this class\n\n def __len__(self):\n return len(self._name)\n\n def __str__(self):\n return 'Student: ' + str(self._name)\n\n __repr__ = __str__\n\n def __getattr__(self, item):\n if item == 'score':\n return 95\n if item == 'grade':\n return lambda: 'A' # return a function\n raise AttributeError(\"Student object has no attribute {0}\".format(item))\n\n def __call__(self):\n print('My name is', self._name)\n\ns = Student('Hailey')\nprint(len(s))\nprint(s)\nprint(s.score)\nprint(s.grade())\ns()\nprint(callable(s))\nprint(callable(abs))\nprint()\n\n\nclass Fibs:\n\n def __init__(self, n):\n self.a, self.b = 0, 1\n self.n = n\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.a, self.b = self.b, self.a + self.b\n if self.a > self.n:\n raise StopIteration()\n return self.a\n\nfor i in Fibs(3):\n print(i)\n\n\nclass Fib:\n\n def __init__(self):\n self.a, self.b = 0, 1\n\n def __getitem__(self, item):\n if isinstance(item, int):\n a, b = 1, 1\n for x in range(item):\n a, b = b, a+b\n return a\n if isinstance(item, slice):\n start = item.start\n stop = item.stop\n step = item.step\n negative = False\n if start is None:\n start = 0\n if step is None:\n step = 1\n if step < 0:\n negative = True\n step = -step\n if start < 0 or stop <= 0 or step == 0 or start >= stop or stop is None:\n raise SyntaxError(\"Illegal slice!\")\n a, b = 1, 1\n l = []\n for x in range(stop):\n if x == start:\n l.append(a)\n a, b = b, a+b\n next_one = start + step\n continue\n if x > start and x == next_one:\n l.append(a)\n next_one += step\n a, b = b, a+b\n if not negative:\n return l\n else:\n return list(reversed(l))\n\n\nf = Fib()\nprint(f[10])\nprint(f[5:10])\nprint(f[:20])\nprint(f[0:20:2])\nprint(f[:50:5])\nprint(f[:50:-5])\n","repo_name":"bambrow/python-programming-notes","sub_path":"class_basics/07_special.py","file_name":"07_special.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"27979794528","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage import filters, feature, img_as_int\nfrom skimage.measure import regionprops\n\n\ndef get_interest_points(image, feature_width):\n alpha = 0.06\n threshold = 0.01\n stride = 2\n sigma = 0.1\n min_distance = 3\n sigma0 = 0.1\n\n print(f'alpha: {alpha}, threshold: {threshold}, stride: {stride}, sigma: {sigma}, min_distance: {min_distance}')\n\n #Step1: blur image (optional)\n filtered_image = filters.gaussian(image, sigma=sigma)\n\n # Step2: calculate gradient of image\n I_x = filters.sobel_v(filtered_image)\n I_y = filters.sobel_h(filtered_image)\n\n # Step3: calculate Gxx, Gxy, Gyy\n I_xx = np.square(I_x)\n I_xy = np.multiply(I_x, I_y)\n I_yy = np.square(I_y)\n\n I_xx = filters.gaussian(I_xx, sigma=sigma0)\n I_xy = filters.gaussian(I_xy, sigma=sigma0)\n I_yy = filters.gaussian(I_yy, sigma=sigma0)\n\n listC = np.zeros_like(image)\n\n # Step4: caculate C matrix\n for y in range(0, image.shape[0]-feature_width, stride):\n for x in range(0, image.shape[1]-feature_width, stride):\n # matrix 17x17\n Sxx = np.sum(I_xx[y:y+feature_width+1, x:x+feature_width+1])\n Syy = np.sum(I_yy[y:y+feature_width+1, x:x+feature_width+1])\n Sxy = np.sum(I_xy[y:y+feature_width+1, x:x+feature_width+1])\n\n detC = (Sxx * Syy) - (Sxy**2)\n traceC = Sxx + Syy\n C = detC - alpha*(traceC**2)\n \n if C > threshold:\n listC[y+feature_width//2, x+feature_width//2] = C\n\n # Step5: using non-maximal suppression\n ret = feature.peak_local_max(listC, min_distance=min_distance, threshold_abs=threshold)\n return ret[:, 1], ret[:, 0]\n\n\ndef get_features(image, x, y, feature_width):\n\n x = np.round(x).astype(int)\n y = np.round(y).astype(int)\n\n sigma_gradient_image = 0.1\n sigma_16x16 = 0.4\n threshold = 0.2\n\n print(f'sigma_gradient_image: {sigma_gradient_image}, sigma_16x16: {sigma_16x16}, threshold: {threshold}')\n\n features = np.zeros((len(x), 4, 4, 8))\n \n # step0: blur image (optional)\n filtered_image = filters.gaussian(image, sigma=sigma_gradient_image)\n \n # step1: compute the gradient of image\n d_im_x = filters.sobel_v(filtered_image)\n d_im_y = filters.sobel_h(filtered_image)\n\n magnitude_gradient = np.sqrt(np.add(np.square(d_im_x), np.square(d_im_y)))\n direction_gradient = np.arctan2(d_im_y, d_im_x)\n direction_gradient[direction_gradient < 0] += 2 * np.pi\n\n # step2:\n # image.shape[0] = 1024\n # image.shape[1] = 768\n # x (0 -> 768)\n # y (0 -> 1024)\n for n, (x_, y_) in enumerate(zip(x, y)):\n # get windows of key point(x, y)\n rows = (y_ - feature_width//2, y_ + feature_width//2 + 1)\n cols = (x_ - feature_width//2, x_ + feature_width//2 + 1)\n\n if rows[0] < 0:\n rows = (0, feature_width+1)\n if rows[1] > image.shape[0]:\n rows = (image.shape[0]-feature_width-1, image.shape[0]-1)\n\n if cols[0] < 0:\n cols = (0, feature_width+1)\n if cols[1] > image.shape[1]:\n cols = (image.shape[1]-feature_width-1, image.shape[1]-1)\n\n # get gradient and angle of key point\n magnitude_window = magnitude_gradient[rows[0]:rows[1], cols[0]:cols[1]]\n direction_window = direction_gradient[rows[0]:rows[1], cols[0]:cols[1]]\n\n # Gaussian filter on window\n magnitude_window = filters.gaussian(\n magnitude_window, sigma=sigma_16x16)\n direction_window = filters.gaussian(\n direction_window, sigma=sigma_16x16)\n\n for i in range(feature_width//4):\n for j in range(feature_width//4):\n current_magnitude = magnitude_window[i*feature_width//4: (\n i+1)*feature_width//4, j*feature_width//4:(j+1)*feature_width//4]\n\n current_direction = direction_window[i*feature_width//4: (\n i+1)*feature_width//4, j*feature_width//4:(j+1)*feature_width//4]\n\n features[n, i, j] = np.histogram(current_direction.reshape(\n -1), bins=8, range=(0, 2*np.pi), weights=current_magnitude.reshape(-1))[0]\n\n # Extract 8 x 16 values into 128-dim vector\n features = features.reshape((len(x), -1,))\n\n # Normalize vector to [0...1]\n norm = np.sqrt(np.square(features).sum(axis=1)).reshape(-1, 1)\n features = features / norm\n\n # Clamp all vector values > 0.2 to 0.2\n features[features >= threshold] = threshold\n\n # Re-normalize\n norm = np.sqrt(np.square(features).sum(axis=1)).reshape(-1, 1)\n features = features / norm\n\n return features\n\n\ndef match_features(im1_features, im2_features):\n threshold = 0.8\n print(f'Match threshold: {threshold}')\n\n matches = []\n confidences = []\n\n for i in range(im1_features.shape[0]):\n distances = np.sqrt(np.square(np.subtract(\n im1_features[i, :], im2_features)).sum(axis=1))\n index_sorted = np.argsort(distances)\n if distances[index_sorted[0]] / distances[index_sorted[1]] < threshold:\n matches.append([i, index_sorted[0]])\n confidences.append(\n 1.0 - distances[index_sorted[0]]/distances[index_sorted[1]])\n matches = np.asarray(matches)\n confidences = np.asarray(confidences)\n return matches, confidences\n","repo_name":"hydrogen1999/IMP","sub_path":"code/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38775665590","text":"import math\ndef square_root():\n num = int(input())\n j = 1\n for i in range(num):\n line = int(input())\n r = round(math.sqrt(line),3)\n fmt = 'input{}: {} sqrt{}: {}'.format(j,line,j,r)\n print(fmt)\n j += 1\n\nsquare_root()","repo_name":"JCPeanu/Mysite","sub_path":"02_input_and_output.py/readinput(for).py","file_name":"readinput(for).py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37447157973","text":"import logging\nimport BigWorld\n\nimport string\n\ndef ApplyStyleOverrides(_logger, config):\n\n try:\n\n from items import vehicles\n\n defaultStyle = config.tryGetValue('styleOverrides', 'default')\n if defaultStyle:\n try:\n defaultStyle = int(defaultStyle)\n except:\n _logger.error('Default for styleOverrides must be integer value')\n defaultStyle = None\n \n for value in vehicles.g_cache.customization20().styles.itervalues():\n config.setValue('originalStyles', str(value.id), value.userString)\n \n override = config.tryGetValue('styleOverrides', str(value.id))\n if override:\n try:\n if override.lower() == 'allow':\n continue\n override = int(override)\n except:\n _logger.error('styleOverrides for #' + str(value.id) + ' must be integer value')\n override = None\n override = override or defaultStyle\n \n if override:\n try:\n #remove modelSet\n value.modelsSet = ''\n #remove style\n for outfitid in value.outfits:\n outift = value.outfits[outfitid]\n for camo in outift.camouflages:\n camo.id = override\n outift.paints = []\n outift.decals = []\n outift.projection_decals = []\n outift.personal_numbers = []\n except Exception as e:\n _logger.error('Failed to apply style override for #' + str(key))\n _logger.error(str(e))\n\n _logger.info('Style overrides have been succesfully')\n except Exception as e:\n _logger.error('Failed apply style overrides')\n _logger.error(str(e))\n","repo_name":"PTwr/WoT_Vanillifer","sub_path":"src/scripts/client/gui/mods/mod_Vanillifer_Styles.py","file_name":"mod_Vanillifer_Styles.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"5298389716","text":"import torch\nimport wandb\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom lib import utils\nimport cv2\nimport glob\nimport torchvision\n\nsys.path.append(\"./\")\nsys.path.append(\"./submodel/\")\nsys.path.append(\"./submodel/stylegan2\")\nfrom stylerig.stylerig import RIGNET\n\nRandomGenerator = np.random.RandomState(42)\nG = RIGNET().cuda().train()\nckpt_path = f'/home/compu/abc/training_result/d10/ckpt/G_40000.pt'\nckpt_dict = torch.load(ckpt_path, map_location=torch.device('cuda'))\nG.load_state_dict(ckpt_dict['model'], strict=False)\n\ntransforms = transforms.Compose([\n transforms.Resize((256,256)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\nimg_paths = sorted(glob.glob(\"/home/compu/abc/samples/k-celeb/*.*g\"))\n\nwith torch.no_grad():\n for img_path in img_paths:\n img_name = os.path.split(img_path)[1][:-4]\n print(f\"processing >>> {img_name}\")\n img = Image.open(img_path)\n img = transforms(img).unsqueeze(0).cuda()\n angle = G.get_angle(img)\n recon_source, w_source = G.get_recon_image(img)\n\n image_list = [img, recon_source]\n for yaw in [0, 15, 30, 45, 60, 75, 90]: \n target_angle = torch.tensor([[angle[0][0],yaw/90,angle[0][2]]]).cuda()\n w_reenact, lm3d_reenact = G(img, target_angle)\n I_reenact = G.get_image_from_w(w_reenact)\n image_list.append(I_reenact)\n\n images = torch.cat(image_list, dim=0)\n sample_image = torchvision.utils.make_grid(images.detach().cpu(), nrow=images.shape[0]).numpy().transpose([1,2,0]) * 127.5 + 127.5\n cv2.imwrite(f'samples/result_{img_name}.jpg', sample_image[:,:,::-1])\n","repo_name":"LJLLDQ/INVZ-hififace","sub_path":"scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74276972357","text":"\nimport logging\nimport re\n\nimport environ\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError\nfrom munch import Munch\nfrom requests import ConnectTimeout, ReadTimeout\nfrom rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom sme_terceirizadas.perfil.models.usuario import (\n ImportacaoPlanilhaUsuarioExternoCoreSSO,\n ImportacaoPlanilhaUsuarioServidorCoreSSO,\n ImportacaoPlanilhaUsuarioUEParceiraCoreSSO\n)\n\nfrom ...dados_comuns.constants import (\n ADMINISTRADOR_DIETA_ESPECIAL,\n ADMINISTRADOR_EMPRESA,\n ADMINISTRADOR_GESTAO_ALIMENTACAO_TERCEIRIZADA,\n ADMINISTRADOR_GESTAO_PRODUTO,\n ADMINISTRADOR_SUPERVISAO_NUTRICAO,\n COGESTOR_DRE\n)\nfrom ...dados_comuns.models import Contato\nfrom ...eol_servico.utils import EOLException, EOLService, EOLServicoSGP\nfrom ...perfil.api.validators import checa_senha, usuario_com_coresso_validation, usuario_e_das_terceirizadas\nfrom ...terceirizada.models import Terceirizada\nfrom ..models import Perfil, PerfisVinculados, Usuario, Vinculo\nfrom ..services.usuario_coresso_service import EOLUsuarioCoreSSO\nfrom .validators import (\n deve_ser_email_sme_ou_prefeitura,\n deve_ter_mesmo_cpf,\n registro_funcional_e_cpf_sao_da_mesma_pessoa,\n senha_deve_ser_igual_confirmar_senha,\n terceirizada_tem_esse_cnpj,\n usuario_e_vinculado_a_aquela_instituicao,\n usuario_nao_possui_vinculo_valido,\n usuario_pode_efetuar_cadastro\n)\n\nenv = environ.Env()\n\nlogger = logging.getLogger(__name__)\n\n\nclass PerfilSimplesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Perfil\n fields = ('nome', 'visao', 'uuid')\n\n\nclass PerfilSerializer(serializers.ModelSerializer):\n class Meta:\n model = Perfil\n exclude = ('id', 'nome', 'ativo')\n\n\nclass PerfisVinculadosSerializer(serializers.ModelSerializer):\n perfil_master = PerfilSimplesSerializer()\n perfis_subordinados = PerfilSimplesSerializer(many=True)\n\n class Meta:\n model = PerfisVinculados\n fields = ('perfil_master', 'perfis_subordinados')\n\n\nclass UsuarioSerializer(serializers.ModelSerializer):\n cpf = serializers.SerializerMethodField()\n nome_fantasia = serializers.SerializerMethodField()\n\n def get_cpf(self, obj):\n if obj.vinculo_atual and isinstance(obj.vinculo_atual.instituicao, Terceirizada):\n return obj.cpf\n return None\n\n def get_nome_fantasia(self, obj):\n if obj.vinculo_atual and isinstance(obj.vinculo_atual.instituicao, Terceirizada):\n return obj.vinculo_atual.instituicao.nome_fantasia\n return None\n\n class Meta:\n model = Usuario\n fields = (\n 'uuid',\n 'cpf',\n 'nome',\n 'email',\n 'date_joined',\n 'registro_funcional',\n 'tipo_usuario',\n 'cargo',\n 'crn_numero',\n 'nome_fantasia'\n )\n\n\nclass UsuarioVinculoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Usuario\n fields = (\n 'uuid',\n 'cpf',\n 'nome',\n 'email',\n 'date_joined',\n 'registro_funcional',\n 'tipo_usuario',\n 'cargo'\n )\n\n\nclass VinculoSerializer(serializers.ModelSerializer):\n perfil = PerfilSimplesSerializer()\n usuario = UsuarioVinculoSerializer()\n\n class Meta:\n model = Vinculo\n fields = ('uuid', 'data_inicial', 'data_final', 'perfil', 'usuario')\n\n\nclass VinculoSimplesSerializer(serializers.ModelSerializer):\n username = serializers.CharField(source='usuario.username')\n nome_usuario = serializers.CharField(source='usuario.nome')\n email_usuario = serializers.CharField(source='usuario.email')\n cpf_usuario = serializers.CharField(source='usuario.cpf')\n uuid_usuario = serializers.CharField(source='usuario.uuid')\n cnpj_empresa = serializers.SerializerMethodField()\n nome_perfil = serializers.CharField(source='perfil.nome')\n visao_perfil = serializers.CharField(source='perfil.visao')\n nome_escola = serializers.SerializerMethodField()\n\n def get_cnpj_empresa(self, obj):\n if obj.content_type.name == 'Terceirizada':\n return obj.instituicao.cnpj\n return None\n\n def get_nome_escola(self, obj):\n if obj.content_type.name == 'Escola':\n return obj.instituicao.nome\n return None\n\n class Meta:\n model = Vinculo\n fields = ('uuid', 'username', 'nome_usuario', 'email_usuario', 'cpf_usuario', 'uuid_usuario', 'cnpj_empresa',\n 'nome_perfil', 'visao_perfil', 'nome_escola', )\n\n\nclass UsuarioUpdateSerializer(serializers.ModelSerializer):\n confirmar_password = serializers.CharField()\n\n def get_informacoes_usuario(self, validated_data):\n return EOLService.get_informacoes_usuario(validated_data['registro_funcional'])\n\n def atualizar_nutricionista(self, usuario, validated_data):\n if validated_data.get('contatos', None):\n usuario.email = validated_data['contatos'][0]['email']\n else:\n usuario.email = validated_data.get('email')\n usuario.cpf = validated_data.get('cpf', None)\n usuario.registro_funcional = None\n usuario.nome = validated_data['nome']\n usuario.crn_numero = validated_data.get('crn_numero', None)\n usuario.save()\n for contato_json in validated_data.get('contatos', []):\n contato = Contato(\n email=contato_json['email'],\n telefone=contato_json['telefone']\n )\n contato.save()\n usuario.contatos.add(contato)\n return usuario\n\n def atualizar_distribuidor(self, usuario, validated_data):\n usuario.email = validated_data.get('email')\n usuario.cpf = validated_data.get('cpf', None)\n usuario.registro_funcional = None\n usuario.nome = validated_data['nome']\n usuario.crn_numero = validated_data.get('crn_numero', None)\n usuario.super_admin_terceirizadas = True\n usuario.save()\n contatos = validated_data.get('contatos', [])\n\n usuario.contatos.set(contatos)\n return usuario\n\n def criar_distribuidor(self, usuario, validated_data):\n usuario.email = validated_data.get('email')\n usuario.cpf = validated_data.get('cpf', None)\n usuario.registro_funcional = None\n usuario.nome = validated_data['nome']\n usuario.crn_numero = validated_data.get('crn_numero', None)\n usuario.super_admin_terceirizadas = True\n usuario.save()\n contatos = validated_data.get('contatos', None)\n contatos_obj = []\n for contato in contatos:\n email = contato.get('email', None)\n telefone = contato.get('telefone', None)\n contato = Contato(\n email=email,\n telefone=telefone\n )\n contato.save()\n contatos_obj.append(contato)\n usuario.contatos.set(contatos_obj)\n return usuario\n\n def create_nutricionista(self, terceirizada, validated_data):\n if validated_data.get('contatos', None):\n email = validated_data['contatos'][0]['email']\n else:\n email = validated_data.get('email')\n if Usuario.objects.filter(email=email).exists():\n raise ValidationError('Já existe um nutricionista com este email: ' + email)\n usuario = Usuario()\n usuario = self.atualizar_nutricionista(usuario, validated_data)\n usuario.is_active = False\n usuario.save()\n usuario.criar_vinculo_administrador(\n terceirizada,\n nome_perfil=ADMINISTRADOR_EMPRESA\n )\n\n def update_nutricionista(self, terceirizada, validated_data):\n novo_usuario = False\n email = validated_data['contatos'][0]['email']\n if Usuario.objects.filter(email=email, super_admin_terceirizadas=False).exists():\n usuario = Usuario.objects.get(email=email, super_admin_terceirizadas=False)\n usuario.contatos.all().delete()\n else:\n if Usuario.objects.filter(email=email).exists():\n raise ValidationError('Já existe um usuario com este email: ' + email)\n usuario = Usuario()\n usuario.is_active = False\n novo_usuario = True\n usuario = self.atualizar_nutricionista(usuario, validated_data)\n if novo_usuario:\n usuario.criar_vinculo_administrador(\n terceirizada,\n nome_perfil=ADMINISTRADOR_EMPRESA\n )\n else:\n vinculo = usuario.vinculo_atual\n vinculo.perfil = Perfil.objects.get(nome=ADMINISTRADOR_EMPRESA)\n vinculo.save()\n\n def create(self, validated_data): # noqa C901\n # TODO: ajeitar isso aqui, criar um validator antes...\n try:\n informacoes_usuario_json = self.get_informacoes_usuario(validated_data) # noqa\n except EOLException as e:\n return Response({'detail': f'{e}'}, status=status.HTTP_400_BAD_REQUEST)\n eh_da_codae = validated_data['instituicao'] == 'CODAE'\n eh_da_dre = validated_data['instituicao'].startswith('DIRETORIA REGIONAL DE EDUCACAO')\n if not eh_da_codae and not eh_da_dre:\n usuario_e_vinculado_a_aquela_instituicao(\n descricao_instituicao=validated_data['instituicao'],\n instituicoes_eol=informacoes_usuario_json\n )\n cpf = informacoes_usuario_json[0]['cd_cpf_pessoa']\n if Usuario.objects.filter(cpf=cpf).exists():\n usuario = Usuario.objects.get(cpf=cpf)\n usuario_nao_possui_vinculo_valido(usuario)\n usuario.enviar_email_confirmacao()\n else:\n email = f'{cpf}@emailtemporario.prefeitura.sp.gov.br'\n usuario = Usuario.objects.create_user(email, 'adminadmin')\n usuario.registro_funcional = validated_data['registro_funcional']\n usuario.nome = informacoes_usuario_json[0]['nm_pessoa']\n usuario.cpf = cpf\n usuario.is_active = False\n usuario.save()\n return usuario\n\n def _validate(self, instance, attrs): # noqa C901\n senha_deve_ser_igual_confirmar_senha(attrs['password'], attrs['confirmar_password']) # noqa\n cpf = attrs.get('cpf')\n cnpj = attrs.get('cnpj', None)\n if cnpj:\n usuario_e_das_terceirizadas(instance)\n terceirizada_tem_esse_cnpj(instance.vinculo_atual.instituicao, cnpj) # noqa\n if instance.cpf:\n deve_ter_mesmo_cpf(cpf, instance.cpf)\n if 'registro_funcional' in attrs:\n registro_funcional_e_cpf_sao_da_mesma_pessoa(instance, attrs['registro_funcional'], attrs['cpf']) # noqa\n usuario_pode_efetuar_cadastro(instance)\n if instance.vinculo_atual.perfil.nome in [\n COGESTOR_DRE,\n ADMINISTRADOR_GESTAO_ALIMENTACAO_TERCEIRIZADA,\n ADMINISTRADOR_DIETA_ESPECIAL,\n ADMINISTRADOR_GESTAO_PRODUTO,\n ADMINISTRADOR_SUPERVISAO_NUTRICAO\n ]:\n deve_ser_email_sme_ou_prefeitura(attrs['email'])\n\n return attrs\n\n def partial_update(self, instance, validated_data): # noqa C901\n cnpj = validated_data.get('cnpj', None)\n validated_data = self._validate(instance, validated_data)\n try:\n self.update(instance, validated_data)\n except IntegrityError as e:\n if re.search('perfil_usuario_cpf_key.+already\\\\sexists', e.args[0], flags=re.I | re.S):\n raise serializers.ValidationError('CPF já cadastrado')\n if re.search('perfil_usuario_email_key.+already\\\\sexists', e.args[0], flags=re.I | re.S):\n raise serializers.ValidationError('Email já cadastrado')\n raise e\n instance.set_password(validated_data['password'])\n if cnpj:\n instance.vinculo_atual.ativar_vinculo()\n instance.is_active = True\n instance.save()\n return instance\n\n class Meta:\n model = Usuario\n fields = (\n 'email',\n 'registro_funcional',\n 'password',\n 'confirmar_password',\n 'cpf'\n )\n write_only_fields = ('password',)\n\n\nclass UsuarioContatoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Contato\n exclude = ('id',)\n\n\nclass SuperAdminTerceirizadaSerializer(serializers.ModelSerializer):\n contatos = UsuarioContatoSerializer(many=True)\n cpf = serializers.CharField(max_length=11, allow_blank=False)\n email = serializers.EmailField(max_length=None, min_length=None, allow_blank=False)\n\n def validate_cpf(self, value):\n if self.context['request']._request.method == 'POST':\n if self.Meta.model.objects.filter(cpf=value).exists():\n raise ValidationError('Usuário com este CPF já existe.')\n return value\n\n def validate_email(self, value):\n if self.context['request']._request.method == 'POST':\n if self.Meta.model.objects.filter(email=value).exists():\n raise ValidationError('Usuário com este Email já existe.')\n return value\n\n class Meta:\n model = Usuario\n fields = (\n 'uuid',\n 'cpf',\n 'nome',\n 'email',\n 'contatos',\n 'cargo'\n )\n\n\nclass UsuarioComCoreSSOCreateSerializer(serializers.ModelSerializer):\n eh_servidor = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n username = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n nome = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n visao = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n subdivisao = serializers.UUIDField(write_only=True, required=False, allow_null=True)\n perfil = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n instituicao = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n cpf = serializers.CharField(write_only=True, required=True, allow_blank=False, allow_null=False)\n email = serializers.EmailField(write_only=True, required=True, allow_blank=False, allow_null=False)\n cargo = serializers.CharField(write_only=True, required=False, allow_blank=True, allow_null=False)\n\n def validate(self, attrs):\n visao = attrs.get('visao')\n subdivisao = attrs.get('subdivisao')\n usuario_com_coresso_validation(visao, subdivisao)\n\n return attrs\n\n class Meta:\n model = Usuario\n fields = ['uuid', 'username', 'email', 'nome', 'visao', 'subdivisao', 'perfil', 'instituicao', 'cpf', 'cargo',\n 'eh_servidor']\n\n def enviar_email(self, usuario, eh_servidor):\n if not eh_servidor:\n usuario.envia_email_primeiro_acesso_usuario_empresa()\n elif env('DJANGO_ENV') == 'production':\n usuario.envia_email_primeiro_acesso_usuario_servidor()\n\n @transaction.atomic # noqa\n def create(self, validated_data):\n dados_usuario_dict = {\n 'login': validated_data['username'],\n 'nome': validated_data['nome'],\n 'email': validated_data['email'],\n 'cargo': validated_data.get('cargo', None),\n 'cpf': validated_data['cpf'],\n 'perfil': validated_data['perfil'],\n 'visao': validated_data['visao'],\n 'subdivisao': validated_data.get('subdivisao', None),\n 'instituicao': validated_data['instituicao'],\n 'eh_servidor': validated_data['eh_servidor']\n }\n\n dados_usuario = Munch.fromDict(dados_usuario_dict)\n eh_servidor = validated_data['eh_servidor'] == 'S'\n\n try:\n existe_core_sso = EOLServicoSGP.usuario_existe_core_sso(login=dados_usuario.login)\n usuario = Usuario.cria_ou_atualiza_usuario_sigpae(dados_usuario=dados_usuario_dict,\n eh_servidor=eh_servidor,\n existe_core_sso=existe_core_sso)\n Vinculo.cria_vinculo(usuario=usuario, dados_usuario=dados_usuario_dict)\n eolusuariocoresso = EOLUsuarioCoreSSO()\n eolusuariocoresso.cria_ou_atualiza_usuario_core_sso(\n dados_usuario=dados_usuario,\n login=dados_usuario.login,\n eh_servidor=dados_usuario.eh_servidor,\n existe_core_sso=existe_core_sso\n )\n logger.info(f'Usuário {validated_data[\"username\"]} criado/atualizado no CoreSSO com sucesso.')\n self.enviar_email(usuario, eh_servidor)\n return usuario\n\n except IntegrityError as e:\n if 'unique constraint' in str(e):\n error = str(e)\n msg = 'Erro, informação duplicada:' + error.split('Key')[1]\n raise serializers.ValidationError(msg)\n raise IntegrityError('Erro ao tentar criar/atualizar usuário: ' + str(e))\n\n except Exception as e:\n msg = f'Erro ao tentar criar/atualizar usuário {validated_data[\"username\"]} no CoreSSO/SIGPAE: {str(e)}'\n logger.error(msg)\n raise serializers.ValidationError(msg)\n\n\nclass AlteraEmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField(required=True)\n\n def update(self, instance, validated_data): # noqa\n try:\n instance.atualiza_email(validated_data.get('email'))\n\n except EOLException as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n except IntegrityError:\n return Response({'detail': 'Já existe um usuário com este e-mail'}, status=status.HTTP_400_BAD_REQUEST)\n except ReadTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n except ConnectTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n return instance\n\n def update_eol(self, username, validated_data):\n try:\n EOLServicoSGP.redefine_email(username, validated_data.get('email'))\n except EOLException as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n except ReadTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n except ConnectTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'detail': 'E-mail atualizado com sucesso!'}, status=status.HTTP_200_OK)\n\n class Meta:\n model = Usuario\n fields = ['uuid', 'username', 'email']\n\n\nclass AlterarVinculoSerializer(serializers.ModelSerializer):\n email = serializers.EmailField(required=True)\n\n def atualizar_email(self, usuario, dados_usuario_dict):\n try:\n usuario.atualiza_email(dados_usuario_dict['email'])\n except IntegrityError:\n return Response({'detail': 'Já existe um usuário com este e-mail'}, status=status.HTTP_400_BAD_REQUEST)\n\n def atribuir_perfil_coresso(self, usuario, dados_usuario_dict):\n if usuario.vinculo_atual.perfil.nome != dados_usuario_dict['perfil']:\n try:\n Vinculo.cria_vinculo(usuario=usuario, dados_usuario=dados_usuario_dict)\n EOLServicoSGP.atribuir_perfil_coresso(login=usuario.username, perfil=dados_usuario_dict['perfil'])\n except EOLException as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n except ReadTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n except ConnectTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, usuario, validated_data):\n dados_usuario_dict = {\n 'email': validated_data['email'],\n 'perfil': validated_data['perfil'],\n 'visao': usuario.vinculo_atual.perfil.visao,\n 'instituicao': usuario.vinculo_atual.instituicao.cnpj,\n }\n\n self.atualizar_email(usuario, dados_usuario_dict)\n self.atribuir_perfil_coresso(usuario, dados_usuario_dict)\n return usuario\n\n class Meta:\n model = Usuario\n fields = ['uuid', 'username', 'email', 'perfil']\n\n\nclass RedefinirSenhaSerializer(serializers.ModelSerializer):\n senha_atual = serializers.CharField(required=True)\n senha = serializers.CharField(required=True)\n confirmar_senha = serializers.CharField(required=True)\n\n def validate(self, attrs):\n senha_deve_ser_igual_confirmar_senha(attrs.get('senha'), attrs.get('confirmar_senha'))\n attrs.pop('confirmar_senha')\n return attrs\n\n def update(self, instance, validated_data): # noqa\n try:\n if 'token' in validated_data:\n retorno = instance.atualiza_senha(senha=validated_data['senha'], token=validated_data['token'])\n if retorno is False:\n return Response({'detail': 'O Link para o reset de senha já foi utilizado/é inválido. '\n 'É necessário gerar um novo link.'}, status=status.HTTP_400_BAD_REQUEST)\n else:\n checa_senha(instance, validated_data['senha_atual'])\n instance.atualiza_senha_sem_token(validated_data['senha'])\n\n except EOLException as e:\n return Response({'detail': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n except ReadTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n except ConnectTimeout:\n return Response({'detail': 'EOL Timeout'}, status=status.HTTP_400_BAD_REQUEST)\n return instance\n\n class Meta:\n model = Usuario\n fields = ['senha_atual', 'senha', 'confirmar_senha']\n\n\nclass ImportacaoPlanilhaUsuarioServidorCoreSSOSerializer(serializers.ModelSerializer):\n class Meta:\n model = ImportacaoPlanilhaUsuarioServidorCoreSSO\n exclude = ['id']\n\n\nclass ImportacaoPlanilhaUsuarioExternoCoreSSOSerializer(serializers.ModelSerializer):\n class Meta:\n model = ImportacaoPlanilhaUsuarioExternoCoreSSO\n exclude = ['id']\n\n\nclass ImportacaoPlanilhaUsuarioUEParceiraCoreSSOSerializer(serializers.ModelSerializer):\n class Meta:\n model = ImportacaoPlanilhaUsuarioUEParceiraCoreSSO\n exclude = ['id']\n\n\nclass ImportacaoPlanilhaUsuarioServidorCoreSSOCreateSerializer(serializers.ModelSerializer):\n conteudo = serializers.FileField(required=True)\n\n def validate(self, attrs):\n conteudo = attrs.get('conteudo')\n if conteudo:\n if not conteudo.name.split('.')[-1] in ['xlsx', 'xls']:\n raise serializers.ValidationError({'detail': 'Extensão do arquivo não suportada.'})\n\n return attrs\n\n class Meta:\n model = ImportacaoPlanilhaUsuarioServidorCoreSSO\n exclude = ('id',)\n\n\nclass ImportacaoPlanilhaUsuarioExternoCoreSSOCreateSerializer(serializers.ModelSerializer):\n conteudo = serializers.FileField(required=True)\n\n def validate(self, attrs):\n conteudo = attrs.get('conteudo')\n if conteudo:\n if not conteudo.name.split('.')[-1] in ['xlsx', 'xls']:\n raise serializers.ValidationError({'detail': 'Extensão do arquivo não suportada.'})\n\n return attrs\n\n class Meta:\n model = ImportacaoPlanilhaUsuarioExternoCoreSSO\n exclude = ('id',)\n\n\nclass ImportacaoPlanilhaUsuarioUEParceiraCoreSSOCreateSerializer(serializers.ModelSerializer):\n conteudo = serializers.FileField(required=True)\n\n def validate(self, attrs):\n conteudo = attrs.get('conteudo')\n if conteudo:\n if not conteudo.name.split('.')[-1] in ['xlsx', 'xls']:\n raise serializers.ValidationError({'detail': 'Extensão do arquivo não suportada.'})\n\n return attrs\n\n class Meta:\n model = ImportacaoPlanilhaUsuarioUEParceiraCoreSSO\n exclude = ('id',)\n","repo_name":"prefeiturasp/SME-Terceirizadas","sub_path":"sme_terceirizadas/perfil/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":24403,"program_lang":"python","lang":"pt","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"32595067947","text":"from abc import ABC, abstractmethod\nfrom typing import Dict, NamedTuple, Type\n\nimport gin\nimport sonnet as snt\nimport tensorflow as tf\n\nfrom sc2_imitation_learning.agents.common.feature_encoder import FeatureEncoder\nfrom sc2_imitation_learning.common.layers import MaskedGlobalAveragePooling1D\n\n\nclass UnitGroupsEncoderOutputs(NamedTuple):\n embedded_unit_group: tf.Tensor\n unit_group_embeddings: Dict[str, tf.Tensor]\n\n\ndef mask_unit_group(unit_group: tf.Tensor, unit_group_length: tf.Tensor, mask_value=0) -> tf.Tensor:\n \"\"\" Masks unit groups according to their length.\n\n Args:\n unit_group: A tensor of rank 3 with a sequence of unit feature vectors.\n unit_group_length: The length of the unit group (assumes all unit feature vectors upfront).\n mask_value: The mask value.\n\n Returns:\n A tensor of rank 3 where indices beyond unit_group_length are zero-masked.\n\n \"\"\"\n if unit_group_length is not None:\n # get rid of last dimensions with size 1\n if unit_group.shape.rank - unit_group_length.shape.rank < 2:\n unit_group_length = tf.squeeze(unit_group_length, axis=-1) # B\n\n # mask with mask_value\n unit_group_mask = tf.sequence_mask(\n tf.cast(unit_group_length, tf.int32), maxlen=unit_group.shape[1], dtype=unit_group.dtype) # B x T\n unit_group_mask = tf.expand_dims(unit_group_mask, axis=-1)\n unit_group *= unit_group_mask\n if mask_value != 0:\n mask_value = tf.convert_to_tensor(mask_value)\n unit_group = tf.cast(unit_group, mask_value.dtype)\n unit_group_mask = tf.cast(unit_group_mask, mask_value.dtype)\n unit_group += (1 - unit_group_mask) * mask_value\n return unit_group\n\n\nclass UnitGroupEncoder(snt.Module, ABC):\n \"\"\" Encoder module for unit group features. \"\"\"\n\n @abstractmethod\n def __call__(self, features: Dict[str, tf.Tensor]) -> UnitGroupsEncoderOutputs:\n \"\"\" Encodes the unit group features\n\n Args:\n features: A Dict with raw scalar features.\n\n Returns:\n A namedtuple with:\n - embedded_unit_group: An embedded unit group vector\n - unit_group_embeddings: A Dict of unit group embeddings.\n \"\"\"\n pass\n\n\n@gin.register\nclass ConcatAverageUnitGroupEncoder(UnitGroupEncoder):\n \"\"\" Unit group encoder module that encodes unit groups by concatenating their average embedding vectors \"\"\"\n def __init__(self,\n embedding_size: int = gin.REQUIRED,\n feature_encoders: Dict[str, Type[FeatureEncoder]] = gin.REQUIRED):\n super().__init__()\n self._feature_encoders = {key: enc() for key, enc in feature_encoders.items()}\n self._unit_group_embed = {\n key: snt.Sequential([\n\n MaskedGlobalAveragePooling1D(mask_value=0), # assume encoded unit group are zero masked before.\n snt.Linear(output_size=embedding_size),\n tf.nn.relu\n ])\n for key in self._feature_encoders.keys()\n }\n\n def __call__(self, features: Dict[str, tf.Tensor]) -> UnitGroupsEncoderOutputs:\n unit_group_embeddings = {\n key: enc(mask_unit_group(features[key], features.get(f'{key}_length', None), -1))\n for key, enc in self._feature_encoders.items()}\n\n embedded_unit_groups = {\n key: emb(unit_group_embeddings[key])\n for key, emb in self._unit_group_embed.items()\n }\n embedded_unit_groups = tf.concat(tf.nest.flatten(embedded_unit_groups), axis=-1)\n\n return UnitGroupsEncoderOutputs(\n embedded_unit_group=embedded_unit_groups, unit_group_embeddings=unit_group_embeddings)\n","repo_name":"chscheller/sc2_imitation_learning","sub_path":"sc2_imitation_learning/agents/common/unit_group_encoder.py","file_name":"unit_group_encoder.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"62"} +{"seq_id":"25198310224","text":"def is_prime_number(x):\n # 1이 아니면서 2부터 (x-1)까지의 수로 나누어 떨어지지 않는 수\n if x == 1:\n return False\n for i in range(2, x):\n if x % i == 0:\n return False\n return True\n\n\nN = int(input())\nnums = list(map(int, input().split()))\nanswer = 0\nfor n in nums:\n if is_prime_number(n):\n answer += 1\nprint(answer)\n","repo_name":"Aqudi/Today_ps","sub_path":"backjoon/1978 소수 찾기.py","file_name":"1978 소수 찾기.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"23721599848","text":"n= int(input())\n\n\ndef isVps(word):\n stackLst=[]\n\n for i in word:\n if i==\"(\":\n stackLst.append(\"(\")\n else:\n if len(stackLst)==0:\n return \"NO\"\n stackLst.pop()\n if stackLst ==0:\n return \"YES\"\n else:\n return\"NO\"\n\n\nvpsList = [input() for _ in range(n)]\nfor i in vpsList:\n print(isVps(i))\n\n\n","repo_name":"ink-0/training","sub_path":"algo/Python/baekjoon/문자열/9012_괄호.py","file_name":"9012_괄호.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"18071521234","text":"from django.urls import path\nfrom . import views\nfrom django.views.generic import TemplateView\n\n\napp_name = \"books\"\n\nurlpatterns = [\n path(\"home/\", views.home_display_books, name=\"display-books\"),\n path(\"details//\", views.DetailBook, name=\"detail-books\"),\n path(\"create_book/\", views.create_book, name=\"create-view\"),\n path(\"update_book//\",\n views.update_free_book, name=\"update-view\"),\n path(\"create_non_free_book/\",\n views.create_non_free_book, name=\"create-non-free-view\"),\n path(\"update_non_free_book//\",\n views.update_non_free_book, name=\"update-non-free-view\"),\n path(\"delete_book//\", views.DeleteBook.as_view(), name=\"delete-view\"),\n path(\"create_review//\",\n views.CreateReview.as_view(), name=\"create-review\"),\n path(\"update_review//\",\n views.UpdateReview.as_view(), name=\"update-review\"),\n path(\"delete_review//\",\n views.DeleteReview.as_view(), name=\"delete-review\"),\n path(\"search_nav//\",\n views.search_nav, name=\"search-nav\"),\n path('index_search/', views.IndexViews_search.as_view(), name=\"index-search\"),\n path('index_specific_search//', views.IndexViews_specific_search.as_view(),\n name=\"index-specific-search\"),\n path('index_filtering//', views.home_display_books_filtered,\n name=\"display-filtered\"),\n path(\"cart/\", views.Cart_display_view, name=\"user-cart\"),\n path(\"add_to_cart//\",\n views.add_item_to_cart, name=\"add-to-cart\"),\n path(\"remove_from_cart//\",\n views.remove_item_from_cart, name=\"remove-from-cart\"),\n path(\"empty_cart/\",\n views.empty_cart, name=\"empty-cart\"),\n path(\"checkout/\",\n views.checkout, name=\"checkout\"),\n path(\"order/\", views.create_order, name=\"create-order\"),\n path(\"order_summary/\", views.order_summary, name=\"order-summary\"),\n path(\"charge/\", views.charge, name=\"charge\"),\n path(\"confirmed_orders/\", views.display_ordered_books,\n name=\"confirmed-ordered-books\"),\n path(\"settings/\", views.Settings,\n name=\"settings\"),\n\n\n\n\n\n\n]\n","repo_name":"Mastercsay25/BookStoreV1","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37979495618","text":"import unittest\nfrom greeter import Greeter\n\nclass GreeterTest(unittest.TestCase):\n def test_greeter_returns_expected(self):\n greeter = Greeter()\n self.assertEqual(greeter.greet(\"Quentin\"), \"Hello, Quentin\")\n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"kiraacorsac/quentin-team-oop","sub_path":"TDD/00-testing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"19340856744","text":"#!/usr/bin/env python\n\nimport time\nfrom letters import Letter\nimport unicornhat as unicorn\n\ndef main():\n try:\n unicorn.set_layout(unicorn.AUTO)\n unicorn.rotation(0)\n unicorn.brightness(0.3)\n width,height=unicorn.get_shape()\n letters = Letter.V + Letter.I + Letter.T\n\n yellow = [247, 178, 28]\n white = [255, 255, 255]\n colors = [yellow, yellow, yellow, white, yellow, yellow, yellow, white]\n\n for x in range(width):\n rgb = colors[x]\n for y in range(height):\n if len(letters) > x :\n row = letters[x]\n if len(row) > y :\n if row[y] == 1 :\n unicorn.set_pixel(x, y, rgb[0], rgb[1], rgb[2])\n\n unicorn.show()\n time.sleep(0.05)\n\n except:\n print('traceback.format_exc():\\n%s',traceback.format_exc())\n exit()\n\nif __name__ == '__main__':\n while True:\n main()\n time.sleep(60*10)\n","repo_name":"TheryCN/ty-pi-light","sub_path":"py/vitality.py","file_name":"vitality.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71327894279","text":"\"\"\"\nYour task: Write a program in main.py,\nthat makes Karel pick up a beeper\nand go back into her house.\n\nKarel starts off in the corner of her house\nas shown in the world. She needs to collect some food,\nrepresented (as all objects in Karel's world are) by a beeper, \nrom outside the doorway and then to return to her initial position.\nYou can assume that every part of the world is always the same.\nThe house is exactly this size,\nthe door is always in the position shown,\nand the beeper is just outside the door.\nThus, all you have to do is write\nthe sequence of commands necessary to have Karel\n\n1. Move to the beeper,\n2. Pick it up, and\n3. Return to her starting point.\n\nEven though the program is only a few lines,\nit is still worth getting at least\na little practice in decomposition.\nIn your solution, include a function\nfor moving to the package, and returning to the starting point.\n\"\"\"\n\nfrom karel.stanfordkarel import *\n\n# File: shelter.py\n# -----------------------------\n# The warmup program defines a \"main\"\n# function which should make Karel \n# move to the beeper, pick it up, and\n# return home.\ndef main():\n move()\n # add your code here\n # first step: move into beeper and pick it up\n move()\n turn_right()\n \n move()\n turn_left()\n move()\n \n pick_beeper()\n \n # second step: move back into starting point\n reverse_turn()\n \n move()\n turn_right()\n move()\n \n turn_left()\n move()\n move()\n \n # reposition Karel to normal position\n reverse_turn()\n \n\ndef turn_right():\n \"\"\"Function for Karel to turn right (turn left 3 times)\"\"\"\n for i in range(3):\n turn_left()\n \ndef reverse_turn():\n \"\"\"Function for Karel to reverse turn\"\"\"\n for i in range(2):\n turn_left()\n \n \n# don't edit these next two lines\n# they tell python to run your main function\nif __name__ == '__main__':\n main()","repo_name":"daimessdn/stanford-cip-3","sub_path":"pre-week/karels-home.py","file_name":"karels-home.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71209388359","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib\n\nimg = cv.imread('cube.jpg')\noverlay = img.copy()\n\n\n\ncv.rectangle(overlay, (30,30), (150,150), (30, 255, 30), 7, cv.LINE_AA) # Top-left corner\ncv.rectangle(overlay, (370,30), (250,150), (30, 255, 30), 7, cv.LINE_AA) # Top-right corner\ncv.rectangle(overlay, (30,370), (150,250), (30, 255, 30), 7, cv.LINE_AA) # Bottom-left corner\ncv.rectangle(overlay, (370,370), (250,250), (30, 255, 30), 7, cv.LINE_AA) # Bottom-right corner\ncv.rectangle(overlay, (30,30), (370,370), (96, 215, 30), cv.FILLED, cv.LINE_AA)\n#cv.rectangle(img, (30,30), (370,370), (96, 215, 30), cv.FILLED, cv.LINE_AA) # Green\n\n#cv.putText(img, (30,30), (370,370), cv.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 2, cv.LINE_AA)\nopacity = 0.75\ncv.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)\n\nwhile True: \n cv.imshow(\"test\", img)\n\n k = cv.waitKey(1)\n if k == 27: # Esc key to breakloop and shutdown\n break\n\ncv.destroyAllWindows()","repo_name":"y-janssens/HandControl","sub_path":"Keyboard/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2845144688","text":"\"\"\"\nGiven a string S, check if the letters can be rearranged so that two characters that are adjacent to each other are not the same.\n\nIf possible, output any possible result. If not possible, return the empty string.\n\nExample 1:\n\nInput: S = \"aab\"\nOutput: \"aba\"\nExample 2:\n\nInput: S = \"aaab\"\nOutput: \"\"\nNote:\n\nS will consist of lowercase letters and have length in range [1, 500].\n\"\"\"\n\nimport collections\nclass Solution(object):\n def reorganizeString(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n counter = collections.Counter(S)\n res = \"#\" #初始res必须有个字符,否则res[-1]会出现错误!\n while counter:\n stop = True\n for c, times in counter.most_common(): #counter.most_common()返回的是一个pair\n if res[-1] != c:\n res += c\n counter[c] -= 1\n if counter[c] == 0:\n del counter[c] #del counter[key] remove the key completely\n stop = False\n break\n if stop: break\n return res[1:] if len(res) == len(S)+1 else \"\"\n","repo_name":"lixuanhong/LeetCode","sub_path":"767. Reorganize String.py","file_name":"767. Reorganize String.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70802502597","text":"import discord\r\nfrom dotenv import load_dotenv\r\nimport os\r\nimport json\r\nimport random\r\nfrom discord.ext import commands, tasks\r\nfrom discord.ext.commands import cooldown, CommandOnCooldown\r\nfrom discord.ext.commands.cooldowns import BucketType\r\n\r\nclass Currency(commands.Cog):\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n\r\n\r\n @commands.command()\r\n @commands.cooldown(1, 120, commands.BucketType.user)\r\n async def find(self,ctx):\r\n try:\r\n global r\r\n r = int(float(random.randint(1,100))/2)\r\n if r == 0:\r\n return await ctx.send(embed=discord.Embed(title=\"You suck, you didn't find any nuts.\", color=0xe0ad53))\r\n if r == 1:\r\n return await ctx.send(embed=discord.Embed(title=\"You found a nut!\", color=0xe0ad53))\r\n if r == 50:\r\n return await ctx.send(embed=discord.Embed(title=\"You found 50 nuts! Woah...\", color=0xe0ad53))\r\n else:\r\n return await ctx.send(embed=discord.Embed(title=\"You found \" + str(r) + \" nuts!\", color=0xe0ad53))\r\n except Exception as error: \r\n raise error\r\n\r\n @find.error\r\n async def find_error(self, ctx, error):\r\n if isinstance(error, commands.CommandOnCooldown):\r\n await ctx.send(embed=discord.Embed(title=\"Woah slow down, buddy.\", description = f\"Your puny body is still recovering from the last expedition. You should rest for {int(error.retry_after)} more seconds.\", color=0xd60000))\r\n\r\n @commands.command()\r\n async def search(self, ctx):\r\n await find.invoke(ctx)\r\n\r\ndef setup(client):\r\n client.add_cog(Currency(client))","repo_name":"Mouseeeee/NutBot","sub_path":"Currency.py","file_name":"Currency.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6208954723","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\n\n# Python 使用def定义函数,myurl是函数的参数\ndef get_url_name(myurl):\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36'\n header = {} # 字典要先定义才能使用,直接使用会报错。\n header['user-agent'] = user_agent\n\n # url = \"https://book.douban.com/top250\"\n\n # response = requests.get(url,headers=header)\n response = requests.get(myurl,headers=header)\n # print(response.text)\n bs_info = bs(response.text, 'html.parser') # lxml语法分析器更强大,兼容性也更强\n # print(bs_info.find_all('div',attrs={'class':'pl2'})[0]) # 不用正则就能找想要的内容,找div下所有匹配attrs\n for tags in bs_info.find_all('div', attrs={'class':'pl2'}):\n # 获取a标签\n # a_tag = tags.contents[1]\n # print(a_tag)\n for atag in tags.find_all('a',):\n # 获取所有连接\n print(atag.get('href'))\n # 获取图书名字\n print(atag.get('title'))\nurls = tuple(f'https://book.douban.com/top250?start={ page * 25}' for page in range(10)) # python3.6开始支持f-string\nbase_url = 'https://book.douban.com/top250?start='\n# urls = [base_url + str(i * 25) for i in range(10)]\nprint(urls)\n\nfrom time import sleep # 反之被禁爬\nif __name__ == '__main__':\n for page in urls:\n get_url_name(page)\n sleep(5)","repo_name":"tuxnotes/python_gktrain","sub_path":"douban1.py","file_name":"douban1.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29662034519","text":"import heapq\n\n\ndef solution(scoville, K):\n heapq.heapify(scoville)\n print(scoville)\n cnt=0\n while len(scoville)>=1:\n first = heapq.heappop(scoville)\n print(first)\n if first >= K:\n print(cnt)\n return cnt\n if len(scoville)==0: return -1\n second = heapq.heappop(scoville)\n heapq.heappush(scoville, first+(second*2))\n cnt+=1\n return -1\n\n\nif __name__ == '__main__':\n scoville = [1, 2, 3]\n K = 11\n print(solution(scoville, K))","repo_name":"airaider/python_algo_study","sub_path":"level 2/더 맵게.py","file_name":"더 맵게.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"32671870065","text":"import asyncio\nimport telepot\nimport telepot.aio\nimport pprint\nfrom telepot.aio.loop import MessageLoop\nfrom telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove\n\nTOKEN = '5335921389:AAFz6bQA7N2hGejNTOv5KMHr4OZiMe1YSns'\nbot = telepot.Bot(TOKEN)\n\nasync def handle(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n pprint.pprint(msg)\n if content_type == 'text':\n message=msg['text']\n await bot.sendMessage(chat_id,'Hi!')\n return\n\nbot = telepot.aio.Bot(TOKEN)\nloop = asyncio.get_event_loop()\n\nloop.create_task(MessageLoop(bot, handle).run_forever())\nprint('Listening ...')\n\nloop.run_forever()\n","repo_name":"taheralnoori/telebot","sub_path":"inline_sample.py","file_name":"inline_sample.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37554238900","text":"\nimport mdtraj as md\nimport numpy as np\nimport pandas as pd\n#import seaborn as sns\nimport matplotlib\nimport matplotlib.font_manager as font_manager\nimport matplotlib.pyplot as plt\n\n\n###############################################################################\n# ROLAVGPLOT\n###############################################################################\n\ndef rol_avgplot2(data, window_size, no_of_std, ylims, ylabel, figure_name, save):\n keys = list(data.keys())\n if num_pep == 9:\n g, axs = plt.subplots(3, 3, figsize=(30, 15))\n # reorder = [6,1,5,4,0,2,8,3,7]\n reorder = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n elif num_pep == 12:\n g, axs = plt.subplots(3, 4, figsize=(30, 15))\n reorder = [11, 7, 3, 4, 10, 1, 0, 2, 8, 5, 6, 9]\n axs = axs.ravel()\n # x = np.linspace(0,500000.0,num=25001)\n x = np.linspace(0,1000000.0,num=10001)\n colours=['xkcd:red','xkcd:orange','xkcd:vibrant purple','xkcd:maroon',\n 'xkcd:cerulean','xkcd:deep magenta','xkcd:teal','xkcd:green',\n 'xkcd:purple','xkcd:grapefruit','xkcd:forest green','xkcd:indigo']\n shade = 0.4\n c=0\n for i in range(len(keys)):\n nm = keys[i]\n for p in data[nm]:\n b = reorder[p]\n df = pd.DataFrame(data[nm][b])\n rolling_mean = df[:].rolling(window_size, center=True).mean()\n rolling_std = df[:].rolling(window_size, center=True).std()\n num = np.shape(x)[0]\n mean = np.reshape(rolling_mean.values,num)\n minim = np.reshape((rolling_mean + (rolling_std * no_of_std)).values,num)\n maxim = np.reshape((rolling_mean - (rolling_std * no_of_std)).values, num)\n axs[p].fill_between([a/1000 for a in x],minim, maxim, alpha=(shade/3), facecolor='{}'.format(colours[c]))\n axs[p].plot([a/1000 for a in x],mean, alpha = shade, linewidth=2.0, color='{}'.format(colours[c]), zorder = 20)\n axs[p].set_title(\"Peptide \" + str(b+1), fontsize=22)\n # axs[p].set_xlabel(\"Simulation Time / $\\mu$ s\")\n axs[p].set_xlabel(\"Simulation Time (ns)\", fontsize=16)\n axs[p].set_ylabel(ylabel, fontsize=16)\n axs[p].set_ylim(ylims)\n axs[p].set_xlim(0.0, 500)\n axs[p].set_xlim(0.0, 1000)\n axs[p].tick_params(labelsize=12)\n # axs[p].legend(legend, loc=1) \n c+=1\n c=0\n shade = 1\n plt.subplots_adjust(hspace=0.4)\n if save == True:\n g.savefig(FIGDIR + mol + '/' + figure_name + \".png\")\n\n\ndef plot_brush_csv(csv, ylims, ylabel):\n data = pd.read_csv(csv)\n\n g, axs = plt.subplots(4, 4, figsize=(30, 25))\n axs = axs.ravel()\n x = np.linspace(0,1000000.0,num=10001)\n colours = ['xkcd:red','xkcd:orange','xkcd:vibrant purple','xkcd:maroon',\n 'xkcd:cerulean','xkcd:deep magenta','xkcd:teal','xkcd:green',\n 'xkcd:purple','xkcd:grapefruit','xkcd:forest green','xkcd:indigo',\n 'xkcd:navy','xkcd:gray','xkcd:yellow','xkcd:pink']\n shade = 0.4\n c = 0\n window_size = 200\n x = np.arange(50000)\n for i in range(16):\n pep = 'pep'+str(i+1)\n datum = pd.DataFrame(data[pep], columns=[pep])\n if \"hel\" or \"strand\" in csv:\n print('USING HELICITY ADJUSTMENT')\n datum[pep] = (datum[pep]/(pep_length-2))*100\n print(datum.head())\n datum[pep+'_rm'] = datum[pep].rolling(window_size, center=True).mean()\n datum[pep+'_std'] = datum[pep].rolling(window_size, center=True).std()\n datum[pep+'_min'] = datum[pep+'_rm'] + datum[pep+'_std']\n datum[pep+'_max'] = datum[pep+'_rm'] - datum[pep+'_std']\n\n axs[i].plot(datum[pep+'_rm'], linewidth=2.0, color='{}'.format(colours[c]), zorder=20)\n axs[i].fill_between(x, datum[pep+'_min'], datum[pep+'_max'], alpha=(shade/3), facecolor='{}'.format(colours[c]))\n\n axs[i].set_title(\"Peptide \" + str(i+1), fontsize=22)\n axs[i].set_xlabel(\"Simulation Time (ns)\", fontsize=16)\n axs[i].set_ylabel(ylabel, fontsize=16)\n if \"hel\" or \"strand\" in csv:\n axs[i].set_ylim(0.0, 100.0)\n else:\n axs[i].set_ylim(ylims)\n axs[i].set_xlim(0.0, 50000.0)\n ns = np.linspace(0, 1000, 11, dtype='int')\n ts = np.linspace(0, 50000, 11)\n axs[i].set_xticks(ticks=ts)\n axs[i].set_xticklabels(labels=ns, fontsize=12)\n # axs[p].legend(legend, loc=1)\n c += 1\n plt.subplots_adjust(hspace=0.4)\n g.savefig(FIGDIR + csv + \".png\", bbox_inches='tight', transparent=True, dpi=300)\n\ndef plot_indcomb(name, pep_length, ylims, ylabels, window_size):\n \"\"\" plot the other things \"\"\"\n\n col = {'capd':\"#d08770\", 'uncapd': \"#b48ead\"}\n font_sizes = [32, 24]\n x = np.arange(100000)\n\n uncapped_name = name[0]+name[1]\n #capped_name = name[0]+\"Ncapped_\"+name[1]\n\n #capped_data = pd.read_csv(capped_name, usecols=['tot_hel'])\n uncapped_data = pd.read_csv(uncapped_name, usecols=['tot_hel'])\n if \"hel\" or \"strand\" in name[1]:\n # capped_data['tot_hel'] = (capped_data['tot_hel'] / (num_pep*((pep_length+1)-2)))*100\n uncapped_data['tot_hel'] = (uncapped_data['tot_hel'] / (num_pep*(pep_length-2)))*100\n #capped_data.rename(columns={'tot_hel': 'capd'}, inplace=True)\n uncapped_data.rename(columns={'tot_hel': 'uncapd'}, inplace=True)\n\n #data = capped_data.merge(uncapped_data, left_index=True, right_index=True)\n data = uncapped_data\n\n #for d in ['capd', 'uncapd']:\n for d in ['uncapd']:\n data[d+'_rm'] = data[d].rolling(window_size, center=True).mean()\n data[d+'_std'] = data[d].rolling(window_size, center=True).std()\n data[d+'_min'] = data[d+'_rm'] + data[d+'_std']\n data[d+'_max'] = data[d+'_rm'] - data[d+'_std']\n\n fig, ax1 = plt.subplots(figsize=(20, 8))\n ax1.plot(data[d+'_rm'], color=col[d], linewidth=4.0, zorder=21)\n ax1.plot(data[d], color=col[d], alpha=.5, linewidth=2.0, zorder=21)\n #ax1.fill_between(x, data[d+'_min'], data[d+'_max'],\n #alpha=.3, facecolor=col[d], zorder=20)\n ax1.set_ylabel(ylabels, fontweight='medium',\n fontsize=font_sizes[0])\n ax1.set_ylim(ylims)\n ax1.set_xlabel('Time (ns)', fontweight='medium', fontsize=font_sizes[0])\n ax1.set_xlim(0.0, 50000.0)\n ns = np.linspace(0, 1000, 11, dtype='int')\n ts = np.linspace(0, 50000, 11)\n ax1.set_xticks(ticks=ts)\n ax1.set_xticklabels(labels=ns, fontweight='medium', fontsize=font_sizes[1])\n ax1.legend(['Rolling Mean', 'Raw Data'], fontsize=font_sizes[1])\n plt.yticks(fontweight='medium', fontsize=font_sizes[1])\n fig.savefig(FIGDIR+uncapped_name+\"_\"+d+\".png\", bbox_inches='tight', transparent=True, dpi=300)\n\n","repo_name":"ravenswing/phd_tools","sub_path":"python_scripts/phd_scripts_backup/plotBrush.py","file_name":"plotBrush.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"176062202","text":"def find_minimum_number_of_moves(rows, cols, start_row, start_col, end_row, end_col):\n min_steps = -1\n\n row_low = start_row if start_row < end_row else end_row\n row_high = start_row if start_row > end_row else end_row\n row_dist = row_high - row_low\n col_low = start_col if start_col < end_col else end_col\n col_high = start_col if start_col > end_col else end_col\n col_dist = col_high - col_low\n import math\n max_moves = int(math.sqrt(row_dist ** 2 + col_dist ** 2) * 0.7 + 0.5 + 2)\n\n deltas = [(-2, -1), (-2, +1), (+2, -1), (+2, +1), (-1, -2), (-1, +2), (+1, -2), (+1, +2)]\n def getAllValidMoves(y0, x0):\n validPositions = []\n for (x, y) in deltas:\n xCandidate = x0 + x\n yCandidate = y0 + y\n if 0 <= xCandidate < end_col and 0 <= yCandidate < end_row:\n validPositions.append([yCandidate, xCandidate])\n \n return validPositions\n\n def dfs(row, col, level):\n if row == end_row and col == end_col:\n if level < min_steps or min_steps == -1:\n min_steps = level\n\n return\n\n if level > max_moves:\n return\n\n for move in getAllValidMoves(row, col):\n if move[1] >= row_low and move[1] <= row_high or move[0] >= col_low and move[0] <= col_high:\n dfs(move[0], move[1], level + 1)\n\n dfs(start_row, end_row, 0)\n return min_steps\n","repo_name":"MrCsabaToth/IK","sub_path":"2019Nov/practice4/knight_tour_dfs.py","file_name":"knight_tour_dfs.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18927586666","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nfrom children_ui import PrefWindow\nfrom fileutils import resource_path\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, version: str):\n super().__init__()\n\n self._job_active = False\n self.apiRevision = version\n self.setup_ui()\n\n self.LightTheme = self.palette()\n\n self.DarkTheme = QtGui.QPalette()\n self.DarkTheme.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))\n self.DarkTheme.setColor(QtGui.QPalette.WindowText, QtCore.Qt.white)\n self.DarkTheme.setColor(QtGui.QPalette.Base, QtGui.QColor(25, 25, 25))\n self.DarkTheme.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))\n self.DarkTheme.setColor(QtGui.QPalette.ToolTipBase, QtCore.Qt.black)\n self.DarkTheme.setColor(QtGui.QPalette.ToolTipText, QtCore.Qt.white)\n self.DarkTheme.setColor(QtGui.QPalette.Text, QtCore.Qt.white)\n self.DarkTheme.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))\n self.DarkTheme.setColor(QtGui.QPalette.ButtonText, QtCore.Qt.white)\n self.DarkTheme.setColor(QtGui.QPalette.BrightText, QtCore.Qt.red)\n self.DarkTheme.setColor(QtGui.QPalette.Link, QtGui.QColor(42, 130, 218))\n self.DarkTheme.setColor(QtGui.QPalette.Highlight, QtGui.QColor(42, 130, 218))\n self.DarkTheme.setColor(QtGui.QPalette.HighlightedText, QtCore.Qt.black)\n\n def set_job_activity(self, active: bool):\n self._job_active = active\n\n def close_event(self, event: QtGui.QCloseEvent):\n if self._job_active:\n reply = QtWidgets.QMessageBox(self)\n reply.setWindowTitle(\"Active job\")\n reply.setText(\"GeckoLoader is busy!\")\n reply.setInformativeText(\"Exiting is disabled\")\n reply.setIcon(QtWidgets.QMessageBox.Warning)\n reply.setStandardButtons(QtWidgets.QMessageBox.Ok)\n reply.setDefaultButton(QtWidgets.QMessageBox.Ok)\n reply.exec_()\n event.ignore()\n else:\n event.accept()\n\n def setup_ui(self):\n self.setObjectName(\"MainWindow\")\n self.setWindowModality(QtCore.Qt.NonModal)\n self.setEnabled(True)\n self.setFixedSize(550, 680)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n font.setPointSize(10)\n font.setWeight(42)\n self.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(str(resource_path(\"bin/icon.ico\"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.setWindowIcon(icon)\n\n #Top level widget\n self.centerWidget = QtWidgets.QWidget(self)\n self.centerWidget.setObjectName(\"centerWidget\")\n\n self.gridLayout = QtWidgets.QGridLayout(self.centerWidget)\n self.gridLayout.setVerticalSpacing(0)\n self.gridLayout.setObjectName(\"gridLayout\")\n\n #Layout for file paths and open boxes\n self.filesLayout = QtWidgets.QGridLayout()\n self.filesLayout.setHorizontalSpacing(0)\n self.filesLayout.setObjectName(\"filesLayout\")\n\n self.dolLayout = QtWidgets.QGridLayout()\n self.dolLayout.setHorizontalSpacing(0)\n self.dolLayout.setObjectName(\"dolLayout\")\n\n #Layout for folder path\n self.gctLayout = QtWidgets.QGridLayout()\n self.gctLayout.setHorizontalSpacing(0)\n self.gctLayout.setVerticalSpacing(5)\n self.gctLayout.setObjectName(\"gctLayout\")\n\n self.destLayout = QtWidgets.QGridLayout()\n self.dolLayout.setHorizontalSpacing(0)\n self.dolLayout.setObjectName(\"dolLayout\")\n\n #Files label\n self.filesLabel = QtWidgets.QLabel(self.centerWidget)\n self.filesLabel.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.filesLabel.sizePolicy().hasHeightForWidth())\n self.filesLabel.setSizePolicy(sizePolicy)\n self.filesLabel.setMinimumSize(QtCore.QSize(80, 30))\n self.filesLabel.setMaximumSize(QtCore.QSize(16777215, 30))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(21)\n font.setWeight(82)\n font.setBold(True)\n self.filesLabel.setFont(font)\n self.filesLabel.setTextFormat(QtCore.Qt.PlainText)\n self.filesLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.filesLabel.setObjectName(\"filesLabel\")\n\n #Dol button to open file\n self.dolButton = QtWidgets.QPushButton(self.centerWidget)\n self.dolButton.setMinimumSize(QtCore.QSize(100, 26))\n self.dolButton.setMaximumSize(QtCore.QSize(100, 26))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(11)\n self.dolButton.setFont(font)\n self.dolButton.setCheckable(False)\n self.dolButton.setChecked(False)\n self.dolButton.setAutoDefault(True)\n self.dolButton.setDefault(False)\n self.dolButton.setFlat(False)\n self.dolButton.setObjectName(\"dolButton\")\n self.dolLayout.addWidget(self.dolButton, 1, 0, 1, 1)\n\n #Dol path textbox\n self.dolTextBox = QtWidgets.QLineEdit(self.centerWidget)\n self.dolTextBox.setEnabled(False)\n self.dolTextBox.setMinimumSize(QtCore.QSize(200, 24))\n self.dolTextBox.setMaximumSize(QtCore.QSize(16777215, 24))\n font = QtGui.QFont()\n font.setFamily(\"Consolas\")\n font.setPointSize(10)\n font.setWeight(42)\n self.dolTextBox.setFont(font)\n self.dolTextBox.setText(\"\")\n self.dolTextBox.setMaxLength(255)\n self.dolTextBox.setFrame(True)\n self.dolTextBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.dolTextBox.setObjectName(\"dolTextBox\")\n self.dolLayout.addWidget(self.dolTextBox, 1, 1, 1, 1)\n\n #horizontal separater codes\n self.horiSepFiles = QtWidgets.QFrame(self.centerWidget)\n self.horiSepFiles.setMinimumSize(QtCore.QSize(474, 30))\n self.horiSepFiles.setContentsMargins(20, 0, 20, 0)\n self.horiSepFiles.setFrameShape(QtWidgets.QFrame.HLine)\n self.horiSepFiles.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.horiSepFiles.setObjectName(\"horiSepFiles\")\n\n #gctFile button to open file\n self.gctFileButton = QtWidgets.QPushButton(self.centerWidget)\n self.gctFileButton.setMinimumSize(QtCore.QSize(100, 26))\n self.gctFileButton.setMaximumSize(QtCore.QSize(100, 26))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(10)\n self.gctFileButton.setFont(font)\n self.gctFileButton.setCheckable(False)\n self.gctFileButton.setChecked(False)\n self.gctFileButton.setAutoDefault(True)\n self.gctFileButton.setDefault(False)\n self.gctFileButton.setFlat(False)\n self.gctFileButton.setObjectName(\"gctFileButton\")\n self.gctLayout.addWidget(self.gctFileButton, 0, 0, 1, 1)\n\n #gctFile path textbox\n self.gctFileTextBox = QtWidgets.QLineEdit(self.centerWidget)\n self.gctFileTextBox.setEnabled(False)\n self.gctFileTextBox.setMinimumSize(QtCore.QSize(200, 24))\n self.gctFileTextBox.setMaximumSize(QtCore.QSize(16777215, 24))\n font = QtGui.QFont()\n font.setFamily(\"Consolas\")\n font.setPointSize(10)\n font.setWeight(42)\n self.gctFileTextBox.setFont(font)\n self.gctFileTextBox.setText(\"\")\n self.gctFileTextBox.setMaxLength(255)\n self.gctFileTextBox.setFrame(True)\n self.gctFileTextBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.gctFileTextBox.setObjectName(\"gctFileTextBox\")\n self.gctLayout.addWidget(self.gctFileTextBox, 0, 1, 1, 1)\n\n #--or-- Label\n self.orFolderLabel = QtWidgets.QLabel(self.centerWidget)\n self.orFolderLabel.setEnabled(False)\n self.orFolderLabel.setMinimumSize(QtCore.QSize(80, 8))\n self.orFolderLabel.setMaximumSize(QtCore.QSize(16777215, 8))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(8)\n font.setWeight(82)\n font.setBold(True)\n self.orFolderLabel.setFont(font)\n self.orFolderLabel.setTextFormat(QtCore.Qt.PlainText)\n self.orFolderLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.orFolderLabel.setObjectName(\"orFolderLabel\")\n self.gctLayout.addWidget(self.orFolderLabel, 1, 0, 1, 2)\n\n #gctFolder button to open file\n self.gctFolderButton = QtWidgets.QPushButton(self.centerWidget)\n self.gctFolderButton.setMinimumSize(QtCore.QSize(100, 26))\n self.gctFolderButton.setMaximumSize(QtCore.QSize(100, 26))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(10)\n self.gctFolderButton.setFont(font)\n self.gctFolderButton.setCheckable(False)\n self.gctFolderButton.setChecked(False)\n self.gctFolderButton.setAutoDefault(True)\n self.gctFolderButton.setDefault(False)\n self.gctFolderButton.setFlat(False)\n self.gctFolderButton.setObjectName(\"gctFolderButton\")\n self.gctLayout.addWidget(self.gctFolderButton, 2, 0, 1, 1)\n\n #gctFolder path textbox\n self.gctFolderTextBox = QtWidgets.QLineEdit(self.centerWidget)\n self.gctFolderTextBox.setEnabled(False)\n self.gctFolderTextBox.setMinimumSize(QtCore.QSize(200, 24))\n self.gctFolderTextBox.setMaximumSize(QtCore.QSize(16777215, 24))\n font = QtGui.QFont()\n font.setFamily(\"Consolas\")\n font.setPointSize(10)\n font.setWeight(42)\n self.gctFolderTextBox.setFont(font)\n self.gctFolderTextBox.setText(\"\")\n self.gctFolderTextBox.setMaxLength(255)\n self.gctFolderTextBox.setFrame(True)\n self.gctFolderTextBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.gctFolderTextBox.setObjectName(\"gctFolderTextBox\")\n self.gctLayout.addWidget(self.gctFolderTextBox, 2, 1, 1, 1)\n\n #horizontal separater dest\n self.horiSepDest = QtWidgets.QFrame(self.centerWidget)\n self.horiSepDest.setMinimumSize(QtCore.QSize(474, 30))\n self.horiSepDest.setContentsMargins(20, 0, 20, 0)\n self.horiSepDest.setFrameShape(QtWidgets.QFrame.HLine)\n self.horiSepDest.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.horiSepDest.setObjectName(\"horiSepDest\")\n\n #Dest button to open file\n self.destButton = QtWidgets.QPushButton(self.centerWidget)\n self.destButton.setMinimumSize(QtCore.QSize(100, 26))\n self.destButton.setMaximumSize(QtCore.QSize(100, 26))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(11)\n self.destButton.setFont(font)\n self.destButton.setCheckable(False)\n self.destButton.setChecked(False)\n self.destButton.setAutoDefault(True)\n self.destButton.setDefault(False)\n self.destButton.setFlat(False)\n self.destButton.setObjectName(\"destButton\")\n self.destLayout.addWidget(self.destButton, 0, 0, 1, 1)\n\n #Dest path textbox\n self.destTextBox = QtWidgets.QLineEdit(self.centerWidget)\n self.destTextBox.setEnabled(False)\n self.destTextBox.setMinimumSize(QtCore.QSize(200, 24))\n self.destTextBox.setMaximumSize(QtCore.QSize(16777215, 24))\n font = QtGui.QFont()\n font.setFamily(\"Consolas\")\n font.setPointSize(10)\n font.setWeight(42)\n self.destTextBox.setFont(font)\n self.destTextBox.setText(\"\")\n self.destTextBox.setMaxLength(255)\n self.destTextBox.setFrame(True)\n self.destTextBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.destTextBox.setObjectName(\"destTextBox\")\n self.destLayout.addWidget(self.destTextBox, 0, 1, 1, 1)\n\n self.filesLayout.addLayout(self.dolLayout, 0, 0, 1, 1)\n self.filesLayout.addWidget(self.horiSepFiles, 1, 0, 1, 1)\n self.filesLayout.addLayout(self.gctLayout, 2, 0, 1, 1)\n self.filesLayout.addWidget(self.horiSepDest, 3, 0, 1, 1)\n self.filesLayout.addLayout(self.destLayout, 4, 0, 1, 1)\n\n #Options Layout\n self.optionsLayout = QtWidgets.QGridLayout()\n self.optionsLayout.setHorizontalSpacing(20)\n self.optionsLayout.setObjectName(\"optionsLayout\")\n\n #Options Label\n self.optionsLabel = QtWidgets.QLabel(self.centerWidget)\n self.optionsLabel.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.optionsLabel.sizePolicy().hasHeightForWidth())\n self.optionsLabel.setSizePolicy(sizePolicy)\n self.optionsLabel.setMinimumSize(QtCore.QSize(79, 23))\n self.optionsLabel.setMaximumSize(QtCore.QSize(16777215, 23))\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(18)\n font.setWeight(82)\n font.setBold(True)\n self.optionsLabel.setFont(font)\n self.optionsLabel.setTextFormat(QtCore.Qt.PlainText)\n self.optionsLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.optionsLabel.setObjectName(\"optionsLabel\")\n self.optionsLayout.addWidget(self.optionsLabel, 0, 0, 1, 4)\n\n #Allocation Label\n self.allocLabel = QtWidgets.QLabel(self.centerWidget)\n self.allocLabel.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.allocLabel.sizePolicy().hasHeightForWidth())\n self.allocLabel.setSizePolicy(sizePolicy)\n self.allocLabel.setMinimumSize(QtCore.QSize(79, 23))\n self.allocLabel.setMaximumSize(QtCore.QSize(16777215, 23))\n self.allocLabel.setTextFormat(QtCore.Qt.PlainText)\n self.allocLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.allocLabel.setObjectName(\"allocLabel\")\n self.optionsLayout.addWidget(self.allocLabel, 1, 0, 1, 1)\n\n #Allocation Textbox\n self.allocLineEdit = QtWidgets.QLineEdit(self.centerWidget)\n self.allocLineEdit.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.allocLineEdit.sizePolicy().hasHeightForWidth())\n self.allocLineEdit.setSizePolicy(sizePolicy)\n self.allocLineEdit.setMinimumSize(QtCore.QSize(79, 23))\n self.allocLineEdit.setMaximumSize(QtCore.QSize(79, 23))\n font = QtGui.QFont()\n font.setFamily(\"Consolas\")\n font.setPointSize(12)\n font.setWeight(42)\n self.allocLineEdit.setFont(font)\n self.allocLineEdit.setText(\"\")\n self.allocLineEdit.setMaxLength(6)\n self.allocLineEdit.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.allocLineEdit.setObjectName(\"allocLineEdit\")\n self.optionsLayout.addWidget(self.allocLineEdit, 2, 0, 1, 1)\n\n #handlerType label\n self.handlerTypeLabel = QtWidgets.QLabel(self.centerWidget)\n self.handlerTypeLabel.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.handlerTypeLabel.sizePolicy().hasHeightForWidth())\n self.handlerTypeLabel.setSizePolicy(sizePolicy)\n self.handlerTypeLabel.setMinimumSize(QtCore.QSize(79, 23))\n self.handlerTypeLabel.setMaximumSize(QtCore.QSize(16777215, 23))\n self.handlerTypeLabel.setTextFormat(QtCore.Qt.PlainText)\n self.handlerTypeLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.handlerTypeLabel.setObjectName(\"handlerTypeLabel\")\n self.optionsLayout.addWidget(self.handlerTypeLabel, 1, 1, 1, 1)\n\n #handlerType selection\n self.handlerTypeSelect = QtWidgets.QComboBox(self.centerWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.allocLabel.sizePolicy().hasHeightForWidth())\n self.handlerTypeSelect.setSizePolicy(sizePolicy)\n self.handlerTypeSelect.setMinimumSize(QtCore.QSize(79, 23))\n self.handlerTypeSelect.setMaximumSize(QtCore.QSize(79, 23))\n self.handlerTypeSelect.setObjectName(\"handlerTypeSelect\")\n self.handlerTypeSelect.addItems([\"FULL\", \"MINI\"])\n self.optionsLayout.addWidget(self.handlerTypeSelect, 2, 1, 1, 1)\n\n #hookType label\n self.hookTypeLabel = QtWidgets.QLabel(self.centerWidget)\n self.hookTypeLabel.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.hookTypeLabel.sizePolicy().hasHeightForWidth())\n self.hookTypeLabel.setSizePolicy(sizePolicy)\n self.hookTypeLabel.setMinimumSize(QtCore.QSize(79, 23))\n self.hookTypeLabel.setMaximumSize(QtCore.QSize(16777215, 23))\n self.hookTypeLabel.setTextFormat(QtCore.Qt.PlainText)\n self.hookTypeLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.hookTypeLabel.setObjectName(\"hookTypeLabel\")\n self.optionsLayout.addWidget(self.hookTypeLabel, 1, 2, 1, 1)\n\n #hookType selection\n self.hookTypeSelect = QtWidgets.QComboBox(self.centerWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.allocLabel.sizePolicy().hasHeightForWidth())\n self.hookTypeSelect.setSizePolicy(sizePolicy)\n self.hookTypeSelect.setMinimumSize(QtCore.QSize(79, 23))\n self.hookTypeSelect.setMaximumSize(QtCore.QSize(79, 23))\n self.hookTypeSelect.setObjectName(\"hookTypeSelect\")\n self.hookTypeSelect.addItems([\"VI\", \"GX\", \"PAD\"])\n self.optionsLayout.addWidget(self.hookTypeSelect, 2, 2, 1, 1)\n\n #txtCodesInclude label\n self.txtCodesIncludeLabel = QtWidgets.QLabel(self.centerWidget)\n self.txtCodesIncludeLabel.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.txtCodesIncludeLabel.sizePolicy().hasHeightForWidth())\n self.txtCodesIncludeLabel.setSizePolicy(sizePolicy)\n self.txtCodesIncludeLabel.setMinimumSize(QtCore.QSize(79, 23))\n self.txtCodesIncludeLabel.setMaximumSize(QtCore.QSize(16777215, 23))\n self.txtCodesIncludeLabel.setTextFormat(QtCore.Qt.PlainText)\n self.txtCodesIncludeLabel.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)\n self.txtCodesIncludeLabel.setObjectName(\"txtCodesIncludeLabel\")\n self.optionsLayout.addWidget(self.txtCodesIncludeLabel, 1, 3, 1, 1)\n\n #txtCodesInclude selection\n self.txtCodesIncludeSelect = QtWidgets.QComboBox(self.centerWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.allocLabel.sizePolicy().hasHeightForWidth())\n self.txtCodesIncludeSelect.setSizePolicy(sizePolicy)\n self.txtCodesIncludeSelect.setMinimumSize(QtCore.QSize(79, 23))\n self.txtCodesIncludeSelect.setMaximumSize(QtCore.QSize(79, 23))\n self.txtCodesIncludeSelect.setObjectName(\"txtCodesIncludeSelect\")\n self.txtCodesIncludeSelect.addItems([\"ACTIVE\", \"ALL\"])\n self.optionsLayout.addWidget(self.txtCodesIncludeSelect, 2, 3, 1, 1)\n\n #horizontal separater options\n self.horiSepOptions = QtWidgets.QFrame(self.centerWidget)\n self.horiSepOptions.setMinimumSize(QtCore.QSize(300, 30))\n self.horiSepOptions.setContentsMargins(20, 0, 20, 0)\n self.horiSepOptions.setFrameShape(QtWidgets.QFrame.HLine)\n self.horiSepOptions.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.horiSepOptions.setObjectName(\"horiSepOptions\")\n self.optionsLayout.addWidget(self.horiSepOptions, 3, 0, 1, 4)\n\n #Advanced options button\n self.exOptionsButton = QtWidgets.QPushButton(self.centerWidget)\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(13)\n self.exOptionsButton.setFont(font)\n self.exOptionsButton.setCheckable(False)\n self.exOptionsButton.setChecked(False)\n self.exOptionsButton.setAutoDefault(True)\n self.exOptionsButton.setDefault(False)\n self.exOptionsButton.setFlat(False)\n self.exOptionsButton.setDisabled(True)\n self.exOptionsButton.setObjectName(\"exOptionsButton\")\n self.optionsLayout.addWidget(self.exOptionsButton, 4, 0, 1, 4)\n\n #horizontal separater 1\n self.horiSepA = QtWidgets.QFrame(self.centerWidget)\n self.horiSepA.setMinimumSize(QtCore.QSize(470, 30))\n self.horiSepA.setFrameShape(QtWidgets.QFrame.HLine)\n self.horiSepA.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.horiSepA.setObjectName(\"horiSepA\")\n \n #horizontal separater 2\n self.horiSepB = QtWidgets.QFrame(self.centerWidget)\n self.horiSepB.setMinimumSize(QtCore.QSize(470, 30))\n self.horiSepB.setFrameShape(QtWidgets.QFrame.HLine)\n self.horiSepB.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.horiSepB.setObjectName(\"horiSepB\")\n\n #response panel\n self.responses = QtWidgets.QPlainTextEdit(self.centerWidget)\n self.responses.setEnabled(True)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.responses.sizePolicy().hasHeightForWidth())\n self.responses.setSizePolicy(sizePolicy)\n self.responses.setMinimumSize(QtCore.QSize(474, 180))\n self.responses.setMaximumSize(QtCore.QSize(16777215, 180))\n font = QtGui.QFont()\n font.setFamily(\"Consolas\")\n font.setPointSize(8)\n font.setWeight(42)\n fontMetrics = QtGui.QFontMetricsF(font)\n spaceWidth = fontMetrics.width(' ')\n self.responses.setFont(font)\n self.responses.setPlainText(\"\")\n self.responses.setTabStopDistance(spaceWidth * 4)\n self.responses.setReadOnly(True)\n self.responses.setObjectName(\"responses\")\n\n #Compile button\n self.compileButton = QtWidgets.QPushButton(self.centerWidget)\n font = QtGui.QFont(\"Helvetica\")\n font.setPointSize(34)\n self.compileButton.setFont(font)\n self.compileButton.setCheckable(False)\n self.compileButton.setChecked(False)\n self.compileButton.setAutoDefault(True)\n self.compileButton.setDefault(False)\n self.compileButton.setFlat(False)\n self.compileButton.setDisabled(True)\n self.compileButton.setObjectName(\"compileButton\")\n\n self.gridLayout.addWidget(self.filesLabel, 0, 0, 1, 1)\n self.gridLayout.addLayout(self.filesLayout, 1, 0, 1, 1)\n self.gridLayout.addWidget(self.horiSepA, 2, 0, 1, 1)\n self.gridLayout.addLayout(self.optionsLayout, 3, 0, 1, 1)\n self.gridLayout.addWidget(self.horiSepB, 4, 0, 1, 1)\n self.gridLayout.addWidget(self.responses, 5, 0, 1, 1)\n self.gridLayout.addWidget(self.compileButton, 6, 0, 1, 1)\n\n self.setCentralWidget(self.centerWidget)\n\n #Toolbar\n self.menubar = QtWidgets.QMenuBar(self)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 470, 22))\n self.menubar.setObjectName(\"menubar\")\n\n self.menuFile = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.menuFile.setFont(font)\n self.menuFile.setObjectName(\"menuFile\")\n\n self.menuEdit = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.menuEdit.setFont(font)\n self.menuEdit.setObjectName(\"menuEdit\")\n\n self.menuHelp = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.menuHelp.setFont(font)\n self.menuHelp.setObjectName(\"menuHelp\")\n\n self.setMenuBar(self.menubar)\n\n self.actionOpen = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionOpen.setFont(font)\n self.actionOpen.setObjectName(\"actionOpen\")\n\n self.actionClose = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionClose.setFont(font)\n self.actionClose.setObjectName(\"actionClose\")\n\n self.actionSave = QtWidgets.QAction(self)\n self.actionSave.setEnabled(False)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionSave.setFont(font)\n self.actionSave.setObjectName(\"actionSave\")\n\n self.actionSave_As = QtWidgets.QAction(self)\n self.actionSave_As.setEnabled(False)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionSave_As.setFont(font)\n self.actionSave_As.setObjectName(\"actionSave_As\")\n\n self.actionUndo = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionUndo.setFont(font)\n self.actionUndo.setMenuRole(QtWidgets.QAction.TextHeuristicRole)\n self.actionUndo.setObjectName(\"actionUndo\")\n self.actionRedo = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionRedo.setFont(font)\n self.actionRedo.setObjectName(\"actionRedo\")\n self.actionCut = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionCut.setFont(font)\n self.actionCut.setObjectName(\"actionCut\")\n self.actionCopy = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionCopy.setFont(font)\n self.actionCopy.setObjectName(\"actionCopy\")\n self.actionPaste = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionPaste.setFont(font)\n self.actionPaste.setObjectName(\"actionPaste\")\n self.actionDelete = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionDelete.setFont(font)\n self.actionDelete.setObjectName(\"actionDelete\")\n self.actionSelect_All = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionSelect_All.setFont(font)\n self.actionSelect_All.setObjectName(\"actionSelect_All\")\n self.actionPreferences = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionPreferences.setFont(font)\n self.actionPreferences.setMenuRole(QtWidgets.QAction.PreferencesRole)\n self.actionPreferences.setObjectName(\"actionPreferences\")\n\n self.actionAbout_GeckoLoader = QtWidgets.QAction(self)\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionAbout_GeckoLoader.setFont(font)\n self.actionAbout_GeckoLoader.setMenuRole(QtWidgets.QAction.AboutRole)\n self.actionAbout_GeckoLoader.setObjectName(\"actionAbout_GeckoLoader\")\n\n self.actionAbout_Qt = QtWidgets.QAction(self)\n self.actionAbout_Qt.setStatusTip(\"\")\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionAbout_Qt.setFont(font)\n self.actionAbout_Qt.setMenuRole(QtWidgets.QAction.AboutQtRole)\n self.actionAbout_Qt.setObjectName(\"actionAbout_Qt\")\n\n self.actionCheck_Update = QtWidgets.QAction(self)\n self.actionCheck_Update.setStatusTip(\"\")\n font = QtGui.QFont()\n font.setFamily(\"Helvetica\")\n self.actionCheck_Update.setFont(font)\n self.actionCheck_Update.setObjectName(\"actionCheck_Update\")\n\n self.menuFile.addAction(self.actionOpen)\n self.menuFile.addAction(self.actionClose)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionSave)\n self.menuFile.addAction(self.actionSave_As)\n \n self.menuEdit.addAction(self.actionPreferences)\n\n self.menuHelp.addAction(self.actionAbout_GeckoLoader)\n self.menuHelp.addAction(self.actionAbout_Qt)\n self.menuHelp.addAction(self.actionCheck_Update)\n\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuEdit.menuAction())\n self.menubar.addAction(self.menuHelp.menuAction())\n \n #Statusbar\n self.statusbar = QtWidgets.QStatusBar(self)\n self.statusbar.setObjectName(\"statusbar\")\n self.setStatusBar(self.statusbar)\n\n self.retranslate_ui()\n self.set_edit_fields()\n\n QtCore.QMetaObject.connectSlotsByName(self)\n\n def _lstrip_textboxes(self):\n attributes = [item for item in vars(self) if not item.startswith('__')]\n\n for item in attributes:\n item = getattr(self, item)\n if isinstance(item, QtWidgets.QLineEdit):\n strlength = len(item.text())\n cursorPos = item.cursorPosition()\n item.setText(item.text().lstrip())\n item.setCursorPosition(cursorPos - (strlength - len(item.text())))\n elif isinstance(item, QtWidgets.QPlainTextEdit):\n sliderPos = item.verticalScrollBar().sliderPosition()\n item.setPlainText(item.toPlainText().lstrip())\n item.verticalScrollBar().setSliderPosition(sliderPos)\n\n def set_edit_fields(self):\n self.filesLabel.setEnabled(True)\n self.dolTextBox.setEnabled(True)\n self.destTextBox.setEnabled(True)\n self.optionsLabel.setEnabled(True)\n self.allocLabel.setEnabled(True)\n self.allocLineEdit.setEnabled(True)\n self.handlerTypeLabel.setEnabled(True)\n self.handlerTypeSelect.setEnabled(True)\n self.hookTypeLabel.setEnabled(True)\n self.hookTypeSelect.setEnabled(True)\n self.txtCodesIncludeLabel.setEnabled(True)\n self.txtCodesIncludeSelect.setEnabled(True)\n self.exOptionsButton.setEnabled(True)\n self.actionSave.setEnabled(True)\n self.actionSave_As.setEnabled(True)\n\n self._lstrip_textboxes()\n\n if self.gctFileTextBox.text() != \"\":\n self.gctFileTextBox.setEnabled(True)\n self.gctFolderTextBox.setDisabled(True)\n elif self.gctFolderTextBox.text() != \"\":\n self.gctFileTextBox.setDisabled(True)\n self.gctFolderTextBox.setEnabled(True)\n else:\n self.gctFileTextBox.setEnabled(True)\n self.gctFolderTextBox.setEnabled(True)\n\n if self.dolTextBox.text().lower().endswith(\".dol\") and len(self.dolTextBox.text()) > 4:\n self.compileButton.setEnabled(self.gctFileTextBox.text() != \"\" or self.gctFolderTextBox.text() != \"\")\n else:\n self.compileButton.setDisabled(True)\n\n def retranslate_ui(self):\n self.setWindowTitle(QtWidgets.QApplication.translate(\"MainWindow\", f\"GeckoLoader {self.apiRevision} - untitled\", None))\n self.menuFile.setTitle(QtWidgets.QApplication.translate(\"MainWindow\", \"&File\", None))\n self.menuEdit.setTitle(QtWidgets.QApplication.translate(\"MainWindow\", \"&Edit\", None))\n self.menuHelp.setTitle(QtWidgets.QApplication.translate(\"MainWindow\", \"&Help\", None))\n self.actionOpen.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"&Open Session...\", None))\n self.actionOpen.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Open a session\", None))\n self.actionOpen.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+O\", None))\n self.actionClose.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"&Close Session...\", None))\n self.actionClose.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Close the current session\", None))\n self.actionClose.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+Shift+C\", None))\n self.actionSave.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"&Save Session\", None))\n self.actionSave.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Save the current session\", None))\n self.actionSave.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+S\", None))\n self.actionSave_As.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"&Save Session As...\", None))\n self.actionSave_As.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Save the current session to the specified location\", None))\n self.actionSave_As.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+Shift+S\", None))\n self.actionUndo.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Undo\", None))\n self.actionUndo.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Undo the last action\", None))\n self.actionUndo.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+Z\", None))\n self.actionRedo.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Redo\", None))\n self.actionRedo.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Redo the last action\", None))\n self.actionRedo.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+Shift+Z\", None))\n self.actionCut.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Cut\", None))\n self.actionCut.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Cuts the selected text and places it on the clipboard\", None))\n self.actionCut.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+X\", None))\n self.actionCopy.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Copy\", None))\n self.actionCopy.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Copies the selected text and places it on the clipboard\", None))\n self.actionCopy.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+C\", None))\n self.actionPaste.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Paste\", None))\n self.actionPaste.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Paste the contents of the clipboard\", None))\n self.actionPaste.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+V\", None))\n self.actionDelete.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Delete\", None))\n self.actionDelete.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Deletes the selected text\", None))\n self.actionSelect_All.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Select All\", None))\n self.actionSelect_All.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Select all of the text\", None))\n self.actionSelect_All.setShortcut(QtWidgets.QApplication.translate(\"MainWindow\", \"Ctrl+A\", None))\n self.actionPreferences.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"&Preferences...\", None))\n self.actionPreferences.setStatusTip(QtWidgets.QApplication.translate(\"MainWindow\", \"Open the application preferences dialog\", None))\n self.actionAbout_GeckoLoader.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"About &GeckoLoader...\", None))\n self.actionAbout_Qt.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"About &Qt...\", None))\n self.actionCheck_Update.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"&Check Update\", None))\n\n self.filesLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Files\", None))\n\n self.dolButton.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Open DOL\", None))\n self.gctFileButton.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Open Codes\", None))\n self.orFolderLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"-\"*40 + \"OR\" + \"-\"*40, None))\n self.gctFolderButton.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Open Folder\", None))\n self.destButton.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Destination\", None))\n\n self.optionsLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Options\", None))\n\n self.allocLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Allocation\", None))\n self.allocLineEdit.setPlaceholderText(QtWidgets.QApplication.translate(\"MainWindow\", \"AUTO\", None))\n\n self.handlerTypeLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Codehandler\", None))\n self.handlerTypeSelect.setItemText(0, QtWidgets.QApplication.translate(\"Dialog\", \"FULL\", None))\n self.handlerTypeSelect.setItemText(1, QtWidgets.QApplication.translate(\"Dialog\", \"MINI\", None))\n\n self.hookTypeLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Code Hook\", None))\n self.hookTypeSelect.setItemText(0, QtWidgets.QApplication.translate(\"Dialog\", \"VI\", None))\n self.hookTypeSelect.setItemText(1, QtWidgets.QApplication.translate(\"Dialog\", \"GX\", None))\n self.hookTypeSelect.setItemText(2, QtWidgets.QApplication.translate(\"Dialog\", \"PAD\", None))\n\n self.txtCodesIncludeLabel.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Include Codes\", None))\n self.txtCodesIncludeSelect.setItemText(0, QtWidgets.QApplication.translate(\"Dialog\", \"ACTIVE\", None))\n self.txtCodesIncludeSelect.setItemText(1, QtWidgets.QApplication.translate(\"Dialog\", \"ALL\", None))\n\n self.exOptionsButton.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Advanced Settings\", None))\n\n self.compileButton.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"RUN\", None))\n","repo_name":"JoshuaMKW/GeckoLoader","sub_path":"main_ui.py","file_name":"main_ui.py","file_ext":"py","file_size_in_byte":38699,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"62"} +{"seq_id":"12349345162","text":"# encoding: utf-8\r\n\"\"\"\r\n@author: Liwenhao\r\n@e-mail: wh.chnb@gmail.com\r\n@file: productionFollowUpService.py\r\n@time: 2019/6/5 17:58\r\n@desc:\r\n\"\"\"\r\nimport time\r\nimport json\r\nimport requests\r\nfrom urllib.parse import urlencode\r\nfrom alibaba.public import Public\r\n\r\n\r\nclass ProductionFollowUpService(Public):\r\n def __init__(self, account):\r\n self.account = account\r\n super(ProductionFollowUpService, self).__init__(self.account)\r\n\r\n def get_single_number(self, page=1):\r\n\r\n url = 'https://onetouch.alibaba.com/moSurvey/schedule/list2.json?'\r\n params = {\r\n 'json': json.dumps({\r\n \"currentPage\": page,\r\n \"pageSize\": 20,\r\n \"sort\": {},\r\n \"orderBy\": \"RANK\",\r\n \"secondRankName\": \"tracking_service_warning_order\",\r\n \"status\": \"tracking_service_wait_for_checking\",\r\n \"descSort\": True})\r\n }\r\n url += urlencode(params)\r\n response = requests.get(url, headers=self.headers)\r\n responseDatas = json.loads(response.text)['data']\r\n totalPage = responseDatas['totalPage']\r\n print(responseDatas)\r\n print(totalPage)\r\n if page > totalPage:\r\n return\r\n else:\r\n postUrl = 'http://py1.jakcom.it:5000/alibaba/post/order/update_process_info'\r\n datas = responseDatas['dataList']\r\n templateItemNameKey_dict = {\"placeholder.expectFinishedDate\": \"请选择计划完成日期\", \"placeholder.remark\": \"请输入备注\",\r\n \"orderSchedule.task.templateName.taScheduleCloth.packageCompletion\": \"包装完成\",\r\n \"orderSchedule.task.templateName.taScheduleCloth.detectionCompletion\": \"检测完成\",\r\n \"orderSchedule.task.templateName.taScheduleCloth.materialPreparing\": \"备料入仓\",\r\n \"orderSchedule.task.templateName.taScheduleCloth.productionCompletion\": \"生产完成\",\r\n \"orderSchedule.task.templateName.taScheduleCloth.shipment\": \"出货\",\r\n \"orderSchedule.task.templateName.taScheduleCloth.productionStart\": \"生产开始\",\r\n \"orderSchedule.task.templateName.amzs.yangpin\": \"样品照片上传\",\r\n \"orderSchedule.task.templateName.amzs.dabaotiewaibia\": \"打包贴外标\",\r\n \"orderSchedule.task.templateName.amzs.dianshangziyuanbao\": \"电商资源包分享\",\r\n \"orderSchedule.task.templateName.amzs.tieneibia\": \"帖内标\",\r\n \"orderSchedule.task.templateName.amzs.jiancha\": \"检测完成\",\r\n \"orderSchedule.task.templateName.amzs.fahuo\": \"发货信息采集\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.detectionCompletion\": \"检测完成\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.packageCompletion\": \"包装完成\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.productionCompletion\": \"生产完成\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.shipment\": \"发货信息采集\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.shipment.button\": \"去发货\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.materialPreparing\": \"备料入仓\",\r\n \"orderSchedule.task.templateName.taScheduleGeneral.productionStart\": \"开始生产\",\r\n \"orderSchedule.task.templateName.loaded\": \"发货\",\r\n \"orderSchedule.task.templateName.knitting.zhuanghuo\": \"装货\",\r\n \"orderSchedule.task.templateName.knitting.xishui\": \"洗水\",\r\n \"orderSchedule.task.templateName.knitting.beiliao\": \"备料\",\r\n \"orderSchedule.task.templateName.knitting.fenghe\": \"套口缝合/手缝\",\r\n \"orderSchedule.task.templateName.knitting.baozhuang\": \"包装\",\r\n \"orderSchedule.task.templateName.knitting.bianzhi\": \"横机编织\",\r\n \"orderSchedule.task.templateName.knitting.houdao\": \"后道\",\r\n \"orderSchedule.task.templateName.3c.zhusu\": \"注塑\",\r\n \"orderSchedule.task.templateName.3c.baozhuang\": \"包装\",\r\n \"orderSchedule.task.templateName.3c.zhuanghuo\": \"装货\",\r\n \"orderSchedule.task.templateName.3c.tiepian\": \"贴片\",\r\n \"orderSchedule.task.templateName.3c.beiliao\": \"备料\",\r\n \"orderSchedule.task.templateName.3c.test\": \"老化测试\",\r\n \"orderSchedule.task.templateName.3c.zuzhuang\": \"组装\",\r\n \"orderSchedule.task.templateName.start30\": \"成品制定完成\",\r\n \"orderSchedule.task.templateName.start10\": \"布匹裁剪完成\",\r\n \"orderSchedule.task.templateName.clothing.zhuanghuo\": \"装货\",\r\n \"orderSchedule.task.templateName.clothing.houdao\": \"后道\",\r\n \"orderSchedule.task.templateName.clothing.baozhuang\": \"包装\",\r\n \"orderSchedule.task.templateName.clothing.beiliao\": \"备料\",\r\n \"orderSchedule.task.templateName.clothing.caifeng\": \"剪裁\",\r\n \"orderSchedule.task.templateName.clothing.fengren\": \"缝纫\",\r\n \"orderSchedule.task.templateName.start50\": \"烫染完成\",\r\n \"orderSchedule.task.templateName.start\": \"开始\",\r\n \"orderSchedule.task.templateName.default.packing\": \"包装\",\r\n \"orderSchedule.task.templateName.default.progressA\": \"生产A\",\r\n \"orderSchedule.task.templateName.default.progressB\": \"生产B\",\r\n \"orderSchedule.task.templateName.default.start\": \"备料\",\r\n \"orderSchedule.task.templateName.default.loading\": \"装货\",\r\n \"orderSchedule.task.templateName.finish\": \"打包装箱完成\",\r\n \"orderSchedule.task.buyer.view\": \"此环节买家查看次数\",\r\n \"orderSchedule.taOrderList.tracking_service_finished\": \"已完成\",\r\n \"orderSchedule.taOrderList.paymentStatus.ADVANCE\": \"预付款\",\r\n \"orderSchedule.taOrderList.paymentStatus.FULL\": \"全款\",\r\n \"orderSchedule.taOrderList.paymentStatus.BALANCE\": \"未付款\",\r\n \"orderSchedule.taOrderList.paymentStatus.NOT_PAY\": \"未付款\",\r\n \"orderSchedule.taOrderList.tracking_service_wait_for_checking\": \"进行中\",\r\n \"orderSchedule.firstLevel.desc.Agriculture\": \"农业\",\r\n \"orderSchedule.firstLevel.desc.Machinery\": \"机械\",\r\n \"orderSchedule.firstLevel.desc.Electrical_Equipment_Supplies\": \"电气设备及用品\",\r\n \"orderSchedule.firstLevel.desc.Lights_Lighting\": \"灯光和照明\",\r\n \"orderSchedule.firstLevel.desc.Office_School_Supplies\": \"办公文教用品\",\r\n \"orderSchedule.firstLevel.desc.Fashion_Accessories\": \"时尚饰品\",\r\n \"orderSchedule.firstLevel.desc.Chemicals\": \"化学物质\",\r\n \"orderSchedule.firstLevel.desc.Apparel\": \"服装\",\r\n \"orderSchedule.firstLevel.desc.Minerals_Metallurgy\": \"矿产和冶金\",\r\n \"orderSchedule.firstLevel.desc.Health_Medical\": \"健康与医疗\",\r\n \"orderSchedule.firstLevel.desc.Business_Services\": \"商业服务\",\r\n \"orderSchedule.firstLevel.desc.Sports_Entertainment\": \"体育和娱乐\",\r\n \"orderSchedule.firstLevel.desc.Fabrication_Services\": \"制造服务\",\r\n \"orderSchedule.firstLevel.desc.Textiles_Leather_Products\": \"纺织及皮革制品\",\r\n \"orderSchedule.firstLevel.desc.Food_Beverage\": \"食品和饮料\",\r\n \"orderSchedule.firstLevel.desc.Rubber_Plastics\": \"橡塑原料及制品\",\r\n \"orderSchedule.firstLevel.desc.Beauty_Personal_Care\": \"美容及个人护理\",\r\n \"orderSchedule.firstLevel.desc.Service_Equipment\": \"维修设备\",\r\n \"orderSchedule.firstLevel.desc.Furniture\": \"家具\",\r\n \"orderSchedule.firstLevel.desc.Gifts_Crafts\": \"礼品和工艺品\",\r\n \"orderSchedule.firstLevel.desc.Timepieces_Jewelry_Eyewear\": \"钟表、珠宝、眼镜\",\r\n \"orderSchedule.firstLevel.desc.Construction_Real_Estate\": \"建筑与房地产\",\r\n \"orderSchedule.firstLevel.desc.Electronic_Components_Supplies\": \"电子元件及用品\",\r\n \"orderSchedule.firstLevel.desc.Home_Appliances\": \"家用电器\",\r\n \"orderSchedule.firstLevel.desc.Luggage_Bags_Cases\": \"行李,袋子和箱子\",\r\n \"orderSchedule.firstLevel.desc.Packaging_Printing\": \"包装与印刷\",\r\n \"orderSchedule.firstLevel.desc.Toys_Hobbies\": \"玩具\",\r\n \"orderSchedule.firstLevel.desc.Environment\": \"环境\",\r\n \"orderSchedule.firstLevel.desc.Vehicles_Accessories\": \"车辆及配件\",\r\n \"orderSchedule.firstLevel.desc.Home_Garden\": \"家居与园艺\",\r\n \"orderSchedule.firstLevel.desc.Telecommunications\": \"电信\",\r\n \"orderSchedule.firstLevel.desc.Energy\": \"能源\",\r\n \"orderSchedule.firstLevel.desc.Security_Protection\": \"安全防护\",\r\n \"orderSchedule.firstLevel.desc.Shoes_Accessories\": \"鞋子和配件\",\r\n \"orderSchedule.firstLevel.desc.Consumer_Electronics\": \"消费电子\",\r\n \"orderSchedule.firstLevel.desc.Tools_Hardware\": \"工具和硬件\",\r\n \"orderSchedule.taskStatus.task_finished\": \"完成\",\r\n \"orderSchedule.taskStatus.wait_for_checking\": \"等待中\",\r\n \"orderSchedule.taskStatus.task_fail\": \"无法办理\",\r\n \"orderSchedule.taskStatus.all\": \"全部\",\r\n \"orderSchedule.produceProgressStatus.shipping_finish\": \"-\",\r\n \"orderSchedule.produceProgressStatus.order_finish\": \"-\",\r\n \"orderSchedule.produceProgressStatus.warning\": \"预警\",\r\n \"orderSchedule.produceProgressStatus.delay\": \"超期\",\r\n \"orderSchedule.produceProgressStatus.normal\": \"正常\",\r\n \"orderSchedule.reviewTaskStatus.wait_for_feedback\": \"待反馈\",\r\n \"orderSchedule.reviewTaskStatus.finish_evaluate\": \"已评价\",\r\n \"orderSchedule.reviewTaskStatus.wait_for_evaluate\": \"未评价\",\r\n \"orderSchedule.reviewTaskStatus.finished\": \"已反馈\",\r\n \"orderSchedule.rejectReason.image_not_true\": \"虚假拍摄\",\r\n \"orderSchedule.rejectReason.other_reason\": \"其他原因\",\r\n \"orderSchedule.rejectReason.location_distance_terrible\": \"地址偏差过大\",\r\n \"orderSchedule.rejectReason.inventory_product\": \"产品是库存产品\",\r\n \"orderSchedule.rejectReason.not_suit_for_order\": \"拍摄内容与订单不符\",\r\n \"orderSchedule.rejectReason.over_produt_time\": \"实际生产进度已超过当前阶段\",\r\n \"orderSchedule.rejectReason.image_or_video_not_uploaded\": \"未上传图片或视频\",\r\n \"orderSchedule.rejectReason.uploaded_image_or_video_unqualified\": \"已上传图片或视频不合格\",\r\n \"orderSchedule.rejectReason.no_enough_photo_employee\": \"未能安排拍摄人员\",\r\n \"orderSchedule.rejectReason.schedule_dely\": \"进度延期\",\r\n \"orderSchedule.rejectReason.not_ship_to_specified_warehouse\": \"未发货至指定海外仓\",\r\n \"orderSchedule.fail.mustLogin\": \"必须先登录才能操作\",\r\n \"orderSchedule.fail.noPermision\": \"无权操作该信保单\",\r\n \"orderSchedule.fail.paramError\": \"参数错误\",\r\n \"orderSchedule.fail.sysException\": \"系统异常\",\r\n \"orderSchedule.feedbackStatus.finished\": \"已反馈\",\r\n \"orderSchedule.feedbackStatus.wait_for_feedback\": \"待反馈\",\r\n \"orderSchedule.detail.product.name\": \"产品名称\",\r\n \"orderSchedule.detail.product.quantity\": \"数量\",\r\n \"orderSchedule.detail.product.description\": \"描述\",\r\n \"ta.schedule.task.template.resolver.eCommerceService\": \"电商一站通\",\r\n \"ta.schedule.task.template.resolver.eCommerceService.desc\": \"从定制、检测、打包贴标到发货一站式服务\",\r\n \"ta.schedule.task.template.resolver.taScheduleGeneral\": \"生产可视化\",\r\n \"ta.schedule.task.template.resolver.taScheduleGeneral.simple\": \"生产可视化简易版\",\r\n \"ta.schedule.task.template.resolver.taScheduleGeneral.desc\": \"生产型订单监控\",\r\n \"ta.schedule.task.template.resolver.taScheduleGeneral.closed\": \"The order has been closed and order tracking service is not available. \\n\",\r\n \"ta.schedule.task.template.resolver.taScheduleGeneral.simpleMode.desc\": \"生产可视化简易模版描述\",\r\n \"ta.schedule.task.template.resolver.taScheduleGeneral.defaultMode.desc\": \"生产可视化模版描述\",\r\n \"ta.schedule.task.template.resolver.1200000212\": \"生产可视化\",\r\n \"ta.schedule.task.template.resolver.default\": \"默认类型\",\r\n \"ta.schedule.task.template.resolver.clothing\": \"服装\",\r\n \"ta.schedule.task.template.resolver.3c\": \"消费电子\",\r\n \"ta.schedule.task.template.resolver.knitting\": \"针织工艺\",\r\n \"ta.schedule.task.template.type\": \"可视化类型\",\r\n \"ta.schedule.page.createOrder.buyer.deliver.content\": \"供应商已发货,服务无法开启,更多了解\",\r\n \"ta.schedule.page.createOrder.buyer.deliver.content.rax\": \"供应商已发货,服务无法开启。\",\r\n \"ta.schedule.page.createOrder.buyer.deliver.title\": \"提示\",\r\n \"ta.schedule.page.createOrder.seller.deliver.content\": \"货物已发出,服务不能被开启。请在发货前开启订单可视化服务。\",\r\n \"ta.schedule.ta.order.relative.prepayment.expect.shipping.time\": \"预付款收齐到账duration天后发货\",\r\n \"ta.schedule.ta.order.relative.prepayment.expect.deliveryTime\": \"计划发货时间\",\r\n \"ta.schedule.ta.order.relative.prepayment.expect.deliveryStatus\": \"发货状态\",\r\n \"ta.schedule.ta.order.relative.prepayment.deliveryTime\": \"实际发货时间\",\r\n \"ta.schedule.ta.order.relative.all.payment.expect.shipping.time\": \"全款到账收齐duration天后发货\",\r\n \"ta.schedule.ta.order.relative.balance.payment.expect.shipping.time\": \"全款收齐后duration天后发货\",\r\n \"seller.show.buyer.view.num\": \"买家访问次数\",\r\n \"rax.buyer.submitted\": \"供应商已敦促及时上传最新生产情况。\", \"rax.buyer.header.title\": \"生产服务跟进\",\r\n \"rax.buyer.header.content.second\": \"如果延期仍未收到状态更新,或有其他问题,请联系供应商。\",\r\n \"rax.buyer.header.content.first\": \"供应商将按约定生产线上传图片。\",\r\n \"rax.buyer.header.confirmed\": \"待卖家确认\", \"rax.buyer.leave.comment\": \"联系供应商\",\r\n \"rax.buyer.select.tracking.steps\": \"选择生产跟进服务类型(多选)\",\r\n \"rax.buyer.select.one.multiple\": \"请选择一个或多个选项\",\r\n \"rax.buyer.preview.tracking\": \"预览生产跟进步骤\",\r\n \"rax.buyer.service.enabled\": \"服务未开启,联系供应商或者去PC端开启服务。\",\r\n \"rax.buyer.estimated.time\": \"预计完成时间\", \"rax.buyer.review\": \"评价\",\r\n \"rax.buyer.reminderText\": \"Note that order tracking only begins once initial payment has been made. \",\r\n \"rax.buyer.remind.supplier\": \"提醒供应商\",\r\n \"deliveryCenter.deliveryHome.deliveryStatus.[ENUM].IN_SHIPPING\": \"发货中\",\r\n \"deliveryCenter.deliveryHome.deliveryStatus.[ENUM].WAIT_TO_SHIPPING\": \"等待发货\",\r\n \"deliveryCenter.deliveryHome.deliveryStatus.[ENUM].BUYER_SIGN\": \"买家签收\",\r\n \"deliveryCenter.deliveryHome.deliveryStatus.[ENUM].CLOSE\": \"订单关闭\",\r\n \"deliveryCenter.deliveryHome.deliveryStatus.[ENUM].FINISH\": \"发货完成\",\r\n \"deliveryCenter.deliveryHome.deliveryStatus.ENUM].SELLER_SIGN\": \"卖家签收\"}\r\n for data in datas:\r\n try:\r\n taOrderNo = data['taOrderNo'] # 信单保号\r\n if int(taOrderNo) == 14187703501026916:\r\n print(data)\r\n buyerName = data['buyerName'] # 买家名称\r\n gmtCreate = data['gmtCreate'] # 创建时间\r\n templateItemNameKey = templateItemNameKey_dict.get(data['templateItemNameKey'], '未获取到样品状态') # 样品状态\r\n localTime = data['expectShippingTime']\r\n expectShippingTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(\r\n int(str(localTime)[:10]))) if localTime is not None else '' # 计划发货时间\r\n status = templateItemNameKey_dict.get('orderSchedule.taOrderList.' + data['status'],\r\n '未获取到办理状态') # 办理状态\r\n currentOwner = data['currentOwner'] # 当前负责人\r\n buyerViewOrNot = '是' if data['buyerViewOrNot'] is True else '否' # 买家是否查看\r\n paymentStatusKey = templateItemNameKey_dict[data['paymentStatusKey']] # 付款状态\r\n produceProgressStatus = templateItemNameKey_dict.get(\r\n 'orderSchedule.produceProgressStatus.' + data['produceProgressStatus'], '未获取到进度提醒') # 进度提醒\r\n gmtTaskExpectFinished = data['gmtTaskExpectFinished'] if data[\r\n 'gmtTaskExpectFinished'] is not None else '' # 当前节点计划完成时间\r\n postData = {\r\n 'Account': self.account,\r\n 'TA_Order_ID': taOrderNo,\r\n 'Buyer_Name': buyerName,\r\n 'Create_Time': gmtCreate,\r\n 'Progress_Rate': templateItemNameKey,\r\n 'Sent_Time': expectShippingTime,\r\n 'Process_Status': status,\r\n 'Principal': currentOwner,\r\n 'View_Status': buyerViewOrNot,\r\n 'Remind_Status': produceProgressStatus,\r\n 'Step_Time': gmtTaskExpectFinished,\r\n 'Payment_Status': paymentStatusKey\r\n }\r\n print(postData)\r\n postResponse = requests.post(postUrl, postData)\r\n print(postResponse)\r\n print(postResponse.text)\r\n except Exception as e:\r\n print(e)\r\n continue\r\n page += 1\r\n return self.get_single_number(page=page)\r\n\r\n def main(self):\r\n self.get_single_number()\r\n\r\n\r\ndef main():\r\n account_list = [\r\n # 'fb1@jakcom.com',\r\n 'fb2@jakcom.com',\r\n 'fb3@jakcom.com',\r\n 'tx@jakcom.com',\r\n ]\r\n for account in account_list:\r\n productionFollowUpService = ProductionFollowUpService(account)\r\n productionFollowUpService.main()\r\n # url = 'https://onetouch.alibaba.com/moSurvey/schedule/list2.json?'\r\n # params = {\r\n # 'json': {\"taOrderNo\":\"14187703501026916\",\"currentPage\":1,\"pageSize\":10,\"sort\":{},\"orderBy\":\"RANK\",\"secondRankName\":\"tracking_service_warning_order\",\"descSort\":True}\r\n # }\r\n # url += urlencode(params)\r\n # response = requests.get(url, productionFollowUpService.headers)\r\n # print(response)\r\n # print(response.text)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"whchnb/spider","sub_path":"alibab/productionFollowUpService.py","file_name":"productionFollowUpService.py","file_ext":"py","file_size_in_byte":24207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"1234531398","text":"import sys; input = lambda : sys.stdin.readline().rstrip()\nT = int(input())\nwhile(T>0):\n N = int(input())\n data = sorted([list(map(int, input().split())) for i in range(N)], key = lambda x : x[0])\n count = 1\n min = data[0][1]\n\n for i in range(1, N):\n if data[i][1] < min:\n min = data[i][1]\n count += 1\n print(count)\n T -= 1","repo_name":"swj9707/OnlineJudge","sub_path":"BOJ/Algorithms/Greedy/1946.py","file_name":"1946.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"26951891728","text":"import os\nimport socket\nimport string\n \ncurrent_dir = '/home/data'\nresult_dir ='/home/output'\n\n\nfiles = os.listdir(current_dir)\n\nlst_files = [] #to store all the tect files in that path\n\nfor val in (files):\n if val.endswith('.txt'): lst_files.append(val)\n\n\npath_if_file = os.path.join(current_dir, 'IF.txt')\npath_limerick_file = os.path.join(current_dir, 'Limerick.txt')\npath_res = os.path.join(result_dir,'result.txt')\n\n\ncnt_words_if = 0\nwith open(path_if_file) as if_fp:\n for row in if_fp:\n cnt_words_if+=len(row.split()) # counting words from IF.txt\n\n\ncnt_words_lim = 0\nwith open(path_limerick_file) as lim_fp:\n for row in lim_fp:\n cnt_words_lim+=len(row.split()) # Counting words from limerick-1.txt\n\n\nall_words = {}\nwith open(path_if_file) as if_fp:\n for f_str in if_fp:\n for value in f_str.split():\n value = value.translate(str.maketrans('', '', string.punctuation))\n value = value.capitalize()\n if value in all_words:\n all_words[value]+=1\n else:\n all_words[value]=1\n\n\nres_words = sorted(all_words.items(), key=lambda x: x[1], reverse=True)[:3]\n\n\nhostname = socket.gethostname()\nIP_address = socket.gethostbyname(hostname)\n\nwith open(path_res,'w') as output_file:\n output_file.write(f\"Listing all the files with .txt extension\\n\")\n for val in lst_files:\n output_file.write(f\"{val}\\n\")\n output_file.write(f\"Total words count in Limerick.txt file:{cnt_words_lim}\\n\")\n output_file.write(f\"Total words count in IF.txt file :{cnt_words_if}\\n\")\n output_file.write(f\"Total No of words : {cnt_words_if+cnt_words_lim}\\n\")\n output_file.write(f\"Top three words and their count in IF.txt file \\n\")\n for wd,wc in res_words:\n output_file.write(f\"{wd} -> count: {wc}\\n\") \n output_file.write(f\"IP address of the machine: {IP_address}\\n\")\n\n\nwith open(path_res) as output_file:\n for row in output_file:\n print(row)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"hemanthmutyala/Docker-Project","sub_path":"docr.py","file_name":"docr.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23179494979","text":"from bs4 import BeautifulSoup as bs\nimport requests\n\n# defining the user-agent and language\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \" \\\n \"(KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36\"\n\nLANGUAGE = \"en-US,en;q=0.5\"\n\n\ndef get_weather_data(url): # stores data in a dictionary given a url\n # start a session\n session = requests.Session()\n session.headers['User-Agent'] = USER_AGENT\n session.headers['Accept-Language'] = LANGUAGE\n session.headers['Content-Language'] = LANGUAGE\n html = session.get(url)\n\n # creates soup object\n soup = bs(html.text, \"html.parser\")\n\n # stores results in dictionary\n result = {'region': soup.find(\"div\", attrs={\"id\": \"wob_loc\"}).text,\n 'temp_now': soup.find(\"span\", attrs={\"id\": \"wob_tm\"}).text,\n 'dayhour': soup.find(\"div\", attrs={\"id\": \"wob_dts\"}).text,\n 'weather_now': soup.find(\"span\", attrs={\"id\": \"wob_dc\"}).text,\n 'precipitation': soup.find(\"span\", attrs={\"id\": \"wob_pp\"}).text,\n 'humidity': soup.find(\"span\", attrs={\"id\": \"wob_hm\"}).text,\n 'wind': soup.find(\"span\", attrs={\"id\": \"wob_ws\"}).text}\n\n # get next days weather\n next_days = []\n days = soup.find(\"div\", attrs={\"id\": \"wob_dp\"})\n for day in days.findAll(\"div\", attrs={\"class\": \"wob_df\"}):\n # gets day name\n day_name = day.findAll(\"div\")[0].attrs['aria-label']\n # gets weather for day\n weather = day.find(\"img\").attrs[\"alt\"]\n temp = day.findAll(\"span\", {\"class\": \"wob_t\"})\n # max temp\n max_temp = temp[0].text\n # min temp\n min_temp = temp[2].text\n next_days.append({\"name\": day_name, \"weather\": weather, \"max_temp\": max_temp, \"min_temp\": min_temp})\n # add to result\n result['next_days'] = next_days\n return result\n\n\nif __name__ == \"__main__\":\n # google search url\n URL = \"https://www.google.com/search?lr=lang_en&ie=UTF-8&q=weather\"\n # place we're searching\n place = \"brisbane\"\n URL += place\n # get the data\n data = get_weather_data(URL)\n\n print(\"Weather for:\", data[\"region\"])\n print(\"Now:\", data[\"dayhour\"])\n print(f\"Temperature now: {data['temp_now']}°C\")\n print(\"Description:\", data['weather_now'])\n print(\"Precipitation:\", data[\"precipitation\"])\n print(\"Humidity:\", data[\"humidity\"])\n print(\"Wind:\", data[\"wind\"])\n print(\"Next days:\")\n for dayweather in data[\"next_days\"]:\n print(\"=\"*40, dayweather[\"name\"], \"=\"*40)\n print(\"Description:\", dayweather[\"weather\"])\n print(f\"Max temperature: {dayweather['max_temp']}°C\")\n print(f\"Min temperature: {dayweather['min_temp']}°C\")\n","repo_name":"cheekibreeki2401/engineering_project","sub_path":"web-scrapingExample.py","file_name":"web-scrapingExample.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41104086641","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n def insert(self, head: 'Optional[Node]', insertVal: int) -> 'Node':\n \n if not head:\n node = Node(insertVal)\n node.next = node\n return node\n \n if id(head) == id(head.next):\n newnode = Node(insertVal, head.next)\n head.next = newnode\n return head\n \n node = head\n \n smallest_node = node\n smallest = node.val\n smallind = 0\n largest = node.val\n largestind = 0\n largest_node = node\n \n count = 1\n node = node.next\n \n while id(node) != id(head):\n if node.val < smallest:\n smallest = node.val\n smallind = count\n smallest_node = node\n \n elif node.val >= largest:\n largest = node.val\n largestind = count\n largest_node = node\n \n node = node.next\n count += 1\n \n if largest == smallest: # monotonic array\n newnode = Node(insertVal, head.next)\n head.next = newnode\n return head\n \n \n if insertVal >= largest or insertVal <= smallest:\n newnode = Node(insertVal, largest_node.next)\n largest_node.next = newnode\n else:\n node = smallest_node\n \n prev = node\n while node.val < insertVal:\n prev = node\n node = node.next\n \n newnode = Node(insertVal, node)\n prev.next = newnode\n \n \n \n return head\n\n \n","repo_name":"nvercillo/LeetcodeAlgorithms","sub_path":"medium/insert-into-a-sorted-circular-linked-list.py","file_name":"insert-into-a-sorted-circular-linked-list.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"70420633797","text":"import time\nnumber = [5,2,4,2,5]\n\nfor x_count in number:\n display = \"\"\n time.sleep(1)\n for i in range(x_count):\n display += \"x\"\n time.sleep(1)\n print(display)\n\n#if you did this correctly be proud about your self because this was slightly more difficult\n","repo_name":"Thevindu-Senanayake/Python_Cheat_Sheet","sub_path":"#14.1.py","file_name":"#14.1.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"24979532689","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.signal import butter, lfilter, freqz\r\n\r\n#time-frequency axis defination\r\nfs=1001\r\nT=1/fs\r\nL=5001 # Keep this odd for ease of Mathematics\r\nt=np.linspace(0,L-1,L)*T\r\n#------------------------Delta-Modulation-------------------------\r\n#INPUT Signal\r\nf0=1\r\nsig0=np.sin(2*np.pi*f0*t)\r\n\r\nx=sig0\r\nu=np.zeros(L)\r\ntx=np.zeros(L)#contains parallel output\r\n\r\nb=np.zeros(L)\r\ndelta=0.1\r\ncount=0 #for emulating a counter for sampling and holding\r\nfor n in range(L):\r\n if(count>0):\r\n u[n]=u[n-1]\r\n tx[n]=tx[n-1]\r\n \r\n \r\n else:\r\n count=50\r\n if x[n] > u[n-1]:\r\n b[n]=delta\r\n tx[n]=1\r\n elif x[n] < u[n-1]:\r\n b[n]=-delta\r\n tx[n]=0\r\n u[n]=u[n-1]+b[n]\r\n \r\n count-=1\r\n \r\nsig1=u\r\n#------------------------Demodulation-------------------------\r\n#xn=xn-1+b[n]\r\nsig2=np.zeros(L)\r\n\r\nfor n in range(L):\r\n if(tx[n]==1):\r\n sig2[n]=sig2[n-1]+delta\r\n else:\r\n sig2[n]=sig2[n-1]-delta\r\n\r\n#Low Pass Filter\r\ncutoff=f0\r\nfs=fs\r\norder=5\r\nnyq = 0.5 * fs\r\nnormal_cutoff = cutoff / nyq\r\n\r\nb, a = butter(order, normal_cutoff, btype='low', analog=False)\r\nsig3 = -lfilter(b, a, sig2)*2\r\n\r\nfig0,ax0=plt.subplots(4)\r\nfig0.show()\r\nax0[0].plot(t,sig0,label='Input Signal', color='y')\r\nax0[0].legend()\r\nax0[0].set_ylabel(\"Amplitude\")\r\nax0[1].plot(t,sig1,label='Delta Modulated Signal')\r\nax0[1].legend()\r\nax0[1].set_ylabel(\"Amplitude\")\r\n\r\nprint('Binary Output')\r\nprint(tx)\r\n\r\nax0[2].plot(t,sig2,label='Receiver Output',color='b')\r\nax0[2].legend()\r\nax0[2].set_ylabel(\"Amplitude\")\r\nax0[3].plot(t,sig3,label='LPF Output',color='r')\r\nax0[3].legend()\r\nax0[3].set_ylabel(\"Amplitude\")\r\n\r\nax0[3].set_xlabel(\"Time\")\r\n","repo_name":"RushilVerma/Digital-Communication-lab","sub_path":"digital_lab_delta_modem.py","file_name":"digital_lab_delta_modem.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"32372169010","text":"k = int(input())\nrn = list(map(int,input().split()))\n\ns = set(rn)\n\nsum_all = sum(rn)\n\nsum_groups = sum([k*i for i in s])\n\ncaptain_k = sum_groups - sum_all\n\ncaptain_room = captain_k/(k-1)\nprint(captain_room)\n\n# hash = {}\n# for i in rn:\n# hash[i] = hash.get(i, 0) + 1\n#\n# for key, val in hash.items():\n# if val == 1:\n# print(key)\n# break\n","repo_name":"pkdism/hackerrank","sub_path":"python/sets/captains-room.py","file_name":"captains-room.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15843770827","text":"__author__ = 'sunary'\n\n\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.spider import BaseSpider\nfrom utils.my_mongo import Mongodb\n\n\nclass AutoRobot(BaseSpider):\n name = \"football_club_crawling\"\n # allowed_domains = []\n start_urls = []\n mongodb_save = None\n download_delay = 3\n\n def init_urls(self):\n self.start_urls = []\n for i in range(1, 45):\n self.start_urls.append('http://footballdatabase.com/ranking/world/%s' % (i))\n\n def init_mongodb(self):\n self.mongodb_save = Mongodb(db='fc', col='fc')\n\n def __init__(self):\n self.init_urls()\n self.init_mongodb()\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n club_name = hxs.select(\"//div[@class='limittext']/text()\").extract()\n country = hxs.select(\"//a[@class='sm_logo-name']/text()\").extract()\n\n for i in range(len(club_name)):\n try:\n data = {'club': club_name[i],\n 'country': country[i]}\n self.mongodb_save.insert(data)\n except:\n pass\n","repo_name":"phongphung/Profile_team_small_portal","sub_path":"scrapy_crawl/spiders/football_club_crawling.py","file_name":"football_club_crawling.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22684846853","text":"import secrets\nimport csv\n\nsalt = secrets.token_hex(64)\n\nfields = [\"salt\"]\nwith open(\"salts/salts.csv\", \"w\") as newfile:\n writer = csv.writer(newfile)\n writer.writerow(fields)\n writer.writerow([salt])\nnewfile.close()\n","repo_name":"HDRUK/CaRROT-Pseudonymisation","sub_path":"examples/Python/salt_gen.py","file_name":"salt_gen.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18345922750","text":"from flask_socketio import SocketIO\nfrom application import create_app\nfrom application.chat import save_message\nimport config\n\n\napp = create_app()\nsocketio = SocketIO(app)\n\n\n@socketio.on('event')\ndef handle_my_custom_event(json, methods=['GET', 'POST']):\n \"\"\"\n Funkcja odpowiedzialna za zapisywanie wiadomości\n oraz za wysyłanie ich do innych klientów.\n :param json: json\n :param methods: POST GET\n :return: None\n \"\"\"\n data = dict(json)\n if \"user_id\" and \"room_id\" in data:\n save_message(json)\n\n socketio.emit('message response', json)\n\n\n# MAINLINE\nif __name__ == \"__main__\": # start the web server\n socketio.run(app, debug=True, host=str(config.Config.SERVER))\n","repo_name":"rwawraf/PZSI-I.S.1st.6-20-21L","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10697529141","text":"from typing import Optional\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ..builder import LOSSES\nimport warnings\nfrom mmdet.models.losses import weight_reduce_loss\n\n\ndef focal_loss(pred, label, weight, alpha=0.25, gamma=2.0, reduction='mean',\n avg_factor=None, class_weight=None, ignore_index=-100, avg_non_ignore=False,\n eps= 1e-8):\n \"\"\"\n 本质上是加权的cross_entropy loss, 权重与focal loss类似.\n Args:\n pred: (N, n_cls)\n label: (N, )\n weight: (N, )\n \"\"\"\n # The default value of ignore_index is the same as F.cross_entropy\n ignore_index = -100 if ignore_index is None else ignore_index\n # element-wise losses\n loss = F.cross_entropy(\n pred,\n label,\n weight=class_weight,\n reduction='none',\n ignore_index=ignore_index) # (N, )\n\n pred_soft = F.softmax(pred, dim=1) + eps # (N, n_cls)\n # (N, n_cls) --> (N, 1) --> (N, )\n pred_soft = torch.gather(pred_soft, index=label.unsqueeze(dim=1), dim=1).squeeze()\n focal_weight = alpha * torch.pow(1 - pred_soft, gamma) # (N, )\n loss = focal_weight * loss\n\n # average loss over non-ignored elements\n # pytorch's official cross_entropy average loss over non-ignored elements\n # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa\n if (avg_factor is None) and avg_non_ignore and reduction == 'mean':\n avg_factor = label.numel() - (label == ignore_index).sum().item()\n\n # apply weights and do the reduction\n if weight is not None:\n weight = weight.float()\n loss = weight_reduce_loss(\n loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\n\n@LOSSES.register_module()\nclass SoftmaxFocalloss(nn.Module):\n def __init__(self,\n use_sigmoid=False,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n class_weight=None,\n ignore_index=None,\n loss_weight=1.0,\n avg_non_ignore=False):\n \"\"\"CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n of softmax. Defaults to False.\n use_mask (bool, optional): Whether to use mask cross entropy loss.\n Defaults to False.\n reduction (str, optional): . Defaults to 'mean'.\n Options are \"none\", \"mean\" and \"sum\".\n class_weight (list[float], optional): Weight of each class.\n Defaults to None.\n ignore_index (int | None): The label index to be ignored.\n Defaults to None.\n loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n avg_non_ignore (bool): The flag decides to whether the loss is\n only averaged over non-ignored targets. Default: False.\n \"\"\"\n super(SoftmaxFocalloss, self).__init__()\n assert (use_sigmoid is False)\n self.use_sigmoid = use_sigmoid\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.class_weight = class_weight\n self.ignore_index = ignore_index\n self.avg_non_ignore = avg_non_ignore\n if ((ignore_index is not None) and not self.avg_non_ignore\n and self.reduction == 'mean'):\n warnings.warn(\n 'Default ``avg_non_ignore`` is False, if you would like to '\n 'ignore the certain label and average loss over non-ignore '\n 'labels, which is the same with PyTorch official '\n 'cross_entropy, set ``avg_non_ignore=True``.')\n\n self.gamma = gamma\n self.alpha = alpha\n self.cls_criterion = focal_loss\n\n def extra_repr(self):\n \"\"\"Extra repr.\"\"\"\n s = f'avg_non_ignore={self.avg_non_ignore}'\n return s\n\n def forward(self,\n cls_score,\n label,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n ignore_index=None,\n **kwargs):\n \"\"\"Forward function.\n\n Args:\n cls_score (torch.Tensor): The prediction.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss. Options are \"none\", \"mean\" and \"sum\".\n ignore_index (int | None): The label index to be ignored.\n If not None, it will override the default value. Default: None.\n Returns:\n torch.Tensor: The calculated loss.\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if ignore_index is None:\n ignore_index = self.ignore_index\n\n if self.class_weight is not None:\n class_weight = cls_score.new_tensor(\n self.class_weight, device=cls_score.device)\n else:\n class_weight = None\n loss_cls = self.loss_weight * self.cls_criterion(\n cls_score,\n label,\n weight,\n gamma=self.gamma,\n alpha=self.alpha,\n class_weight=class_weight,\n reduction=reduction,\n avg_factor=avg_factor,\n ignore_index=ignore_index,\n avg_non_ignore=self.avg_non_ignore,\n **kwargs)\n return loss_cls","repo_name":"Yzichen/mmLaneDet","sub_path":"mmlane/models/losses/Softmax_Focalloss.py","file_name":"Softmax_Focalloss.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"62"} +{"seq_id":"33295059591","text":"\n\nimport cv2\nimport numpy as np\n\nimport sys\nsys.path.append(\"/home/jasonraiti/Documents/GitHub/USC_REU/Project_Files/Jasons_Functions/\")\n\nfrom drawline import * \nfrom skeleton_to_graph import * # graph = skeleton_to_graph(path)\nfrom open_or_show_image import *\nfrom find_line_segment_intersection import *\nfrom matplotlib.path import Path\n\ndef get_negative_image(image):\n image = (255 - image)\n return image\n\n# def find_nearest_white(img, target):\n# nonzero = cv2.findNonZero(img)\n# distances = np.sqrt((nonzero[:,:,0] - target[0]) ** 2 + (nonzero[:,:,1] - target[1]) ** 2) \n# # distances = np.sqrt((nonzero[:,:,0] - target[1]) ** 2 + (nonzero[:,:,1] - target[0]) ** 2) # try flipping the coordinates \n# nearest_index = np.argmin(distances)\n# return nonzero[nearest_index]\n\n\n\ndef find_nearest_white(img, target):\n nonzero = np.argwhere(img == 255)\n distances = np.sqrt((nonzero[:,0] - target[0]) ** 2 + (nonzero[:,1] - target[1]) ** 2)\n nearest_index = np.argmin(distances)\n return nonzero[nearest_index]\n\n\n\ndef zig_zag2(start_point,end_point,zig_zag_image,boundary_image, i ): #zigzagsize will scale the size of the zig zags\n #inputs: start_point,end_point,zig_zag_image,boundary_image, i\n # i is just an integer, if even it will zig, if odd it will zag\n\n \n slope_vector = np.array([ end_point[0]- start_point[0] , end_point[1] - start_point[1] ]) # vector representation of slope here in format [y , x]\n # step_vector = slope_vector / (num_turns - 1) # partition total change inslope into steps to zig zag across and -1 because the number of turns will be one less than the number of partitions (also in format [y , x])\n normalized_slope_vector = slope_vector/np.linalg.norm(slope_vector) # here we are getting the normalized step_vector so we can use its direction and ignore its magnitude \n # print(normalized_slope_vector)\n \n #--------------------- find closest obstactle in perpendicular polygon (boundary is start and end points and the 2 perpendicular image edge points )\n #--------- get vector (in correct direction) perpendicular slope to calculate distance to nearest boundary in path of zig zag\n if i%2 ==0: # set zig and zag values (with alternating directions): this is basically creating zig zag vector perpendicular to the skeleton chunk input\n normalized_perpendicular_vector = [-normalized_slope_vector[1], normalized_slope_vector[0]]\n else:\n normalized_perpendicular_vector = [normalized_slope_vector[1], -normalized_slope_vector[0]]\n #------------- find corresponding edge points\n \n scalar = boundary_image.shape[0]+boundary_image.shape[1]\n out_of_bounds_scalar = np.multiply(normalized_perpendicular_vector, scalar) # this is to guarantee the start /endpoint linesegments extend past edges of images \n \n #define bounds of image\n top_side = [(1,1) , (1,boundary_image.shape[1] -1 )]\n bottom_side = [(boundary_image.shape[0] -1,boundary_image.shape[1] -1) , (boundary_image.shape[0]-1 ,1) ]\n right_side = [(1 ,boundary_image.shape[1] - 1) , (boundary_image.shape[0]-1,boundary_image.shape[1]-1)]\n left_side = [(boundary_image.shape[0]-1,1) , (1,1)]\n \n #define perpendiculat start and endpoint lines \n start_point_perpendicular_line = [ start_point, start_point + out_of_bounds_scalar ]\n end_point_perpendicular_line = [ end_point, end_point + out_of_bounds_scalar ]\n \n #for checking\n test_image = np.zeros((boundary_image.shape[0],boundary_image.shape[1])) # get black background \n \n #create for loop for finding corresponding edge intersection\n sides_list = [ top_side, bottom_side, right_side, left_side]\n for side in sides_list:\n try:\n start_point_intersection = find_line_segment_intersection(start_point_perpendicular_line,side)\n end_point_intersection = find_line_segment_intersection(end_point_perpendicular_line,side)\n except:\n # print(\"no intersection\")\n pass\n \n # ------------- create polygon of search area for find nearest white\n\n tupVerts=[ start_point, end_point , end_point_intersection,start_point_intersection ]\n\n dim1 = boundary_image.shape[0]\n dim2= boundary_image.shape[1]\n if dim1 >= dim2:\n grid_dim = dim1\n else:\n grid_dim = dim2\n x, y = np.meshgrid(np.arange(grid_dim), np.arange(grid_dim)) # make a canvas with coordinates\n x, y = x.flatten(), y.flatten()\n points = np.vstack((x,y)).T \n\n p = Path(tupVerts) # make a polygon\n grid = p.contains_points(points)\n mask = grid.reshape(grid_dim,grid_dim) # now you have a mask with points inside a polygon\n show_image(mask)\n \n #------------------------ here i need to figure out how to know the zig zag size : sense the edge \n \n negative_image = get_negative_image( boundary_image)\n \n a = find_nearest_white( negative_image, end_point) #this might have confusion of coordinate point paits \n b = end_point\n # print( a,b )\n \n dist = np.linalg.norm(a-b) #this calculates euclidean distance between two points \n # print(\"distance from nearest boundary \", dist , '\\n')\n zig_zag_size_vector = normalized_slope_vector * int(dist-.05) # make the zig zag only as far as the closest boundary \n\n if dist == 0: \n # show_image ( negative_image)\n print(\"warning: point at \", b , \"distance from nearest boundary is \", dist , '\\n')\n # print(zig_zag_size_vector)\n\n # ----------------------- below the actual zig zags happen \n\n if i%2 ==0: # set zig and zag values (with alternating directions): this is basically creating zig zag vector perpendicular to the skeleton chunk input\n zigzag = [-zig_zag_size_vector[1], zig_zag_size_vector[0]]\n else:\n zigzag = [zig_zag_size_vector[1], -zig_zag_size_vector[0]]\n\n pt1 = start_point\n pt2 = end_point + zigzag\n pt3 = end_point\n try : zig_zag_image = drawline(pt1,pt2,zig_zag_image)\n except : print(\"error zigging\")\n try : zig_zag_image = drawline(pt2,pt3,zig_zag_image)\n except : print(\"error zigging\")\n \n #image = drawline(start_point,end_point,image) #this is just for error checking, draw a line between start and end points \n #show_image(image)\n # show_image(get_negative_image( boundary_image))\n return(zig_zag_image)\n\n\n\n# future me : this is what happens --> make array of all pixels on perpedicular vector \n# make 1 directional --> only in direction of zig zag","repo_name":"jzraiti/Coverage_Algorithm_Enviornmental_Sampling_Autonomous_Surface_Vehicle","sub_path":"Project_Files/Jasons_old_scripts/zig_zag1.9.py","file_name":"zig_zag1.9.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39975650537","text":"import pandas as pd\n\nfrom psycop.common.model_evaluation.binary.utils import (\n auroc_by_group,\n sensitivity_by_group,\n)\nfrom psycop.common.test_utils.str_to_df import str_to_df\n\n\ndef test_auroc_by_group():\n input_df = str_to_df(\n \"\"\"id,y,y_hat_probs,\n 1,1,0.9, # Good prediction\n 1,1,0.8, # Good prediction\n 1,1,0.7, # Good prediction\n 1,0,0.6, # Good prediction\n 1,0,0.5, # Good prediction\n 2,1,0.7, # Good prediction\n 2,1,0.2, # Bad prediction\n 2,1,0.8, # Good prediction\n 2,0,0.9, # Bad prediction\n 2,0,0.1, # Good prediction\n 3,1,0.5, # Bad prediction\n 3,1,0.4, # Bad prediction\n 3,1,0.3, # Bad prediction\n 3,0,0.9, # Bad prediction\n 3,0,0.8, # Bad prediction\n \"\"\",\n )\n\n large_df = pd.concat([input_df for _ in range(10)])\n\n auroc_by_group_df = auroc_by_group(\n df=large_df,\n groupby_col_name=\"id\",\n confidence_interval=True,\n n_bootstraps=10,\n )\n\n assert auroc_by_group_df[\"auroc\"].is_monotonic_decreasing\n assert auroc_by_group_df[\"n_in_bin\"].to_list() == [50.0, 50.0, 50.0]\n assert auroc_by_group_df[\"ci_lower\"].is_monotonic_decreasing\n assert auroc_by_group_df[\"ci_upper\"].is_monotonic_decreasing\n\n\ndef test_sensitivity_by_group():\n input_df = str_to_df(\n \"\"\"id,y,y_hat,\n 1,1,1, # Good prediction\n 1,1,1, # Good prediction\n 1,1,1, # Good prediction\n 2,1,0, # Bad prediction\n 2,1,0, # Bad prediction\n 2,1,0, # Bad prediction\n \"\"\",\n )\n\n # Test using a categorical\n input_df[\"id\"] = pd.Categorical(input_df[\"id\"], ordered=True)\n\n large_df = pd.concat([input_df for _ in range(10)])\n\n output_df = sensitivity_by_group(\n df=large_df,\n groupby_col_name=\"id\",\n confidence_interval=True,\n n_bootstraps=10,\n )\n\n assert output_df[\"sensitivity\"].to_list() == [1.0, 0.0]\n assert output_df[\"n_in_bin\"].to_list() == [30.0, 30.0]\n assert output_df[\"ci_lower\"].to_list() == [1.0, 0.0]\n assert output_df[\"ci_upper\"].to_list() == [1.0, 0.0]\n","repo_name":"Aarhus-Psychiatry-Research/psycop-common","sub_path":"psycop/common/model_evaluation/binary/test_subgroup_data.py","file_name":"test_subgroup_data.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"24095073972","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 13 11:57:03 2021\n\n@author: leona\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\ncolumns = ['data_inversa', 'uf', 'mortos']\nAC = []\ndata = pd.DataFrame(columns=columns)\n\nfor i,ano in enumerate(np.arange(2007,2022)):\n \n \n AC.append(pd.read_csv(\"./Detram/datatran\" + str(ano) + '.csv', sep = ';'))\n \n df = AC[i].loc[:, columns]\n \n data = pd.concat([data,df], ignore_index=True)\n\ndata['ano'] = pd.DatetimeIndex(data['data_inversa']).year\n\n\ndataMortes = data.groupby(['ano', 'uf'])['mortos'].sum().reset_index(name='mortos')\ndataAcidentes = data.groupby(['ano', 'uf']).size().reset_index(name='acidentes')\n\ndataMortes['acidentes'] = dataAcidentes.acidentes\nfig = px.bar(dataAcidentes, x = 'ano', y = 'acidentes', color='uf', title=\"Quantidade de Acidente por Estado\")\nfig.show()\n\nfig = px.scatter(dataMortes, x=\"acidentes\", y=\"mortos\", animation_frame=\"ano\", animation_group=\"uf\",\n size=\"mortos\", color=\"uf\", hover_name=\"uf\",\n log_x=True)\n\nfig[\"layout\"].pop(\"updatemenus\") # optional, drop animation buttons\nfig.show()\n \n ","repo_name":"LeonardoVieiraGuimaraes/visualizacaoDados-doutorado","sub_path":"AT2.py","file_name":"AT2.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15283919796","text":"\"\"\"\r\nYou are given a string and two markers (the initial and final). You have to find a substring enclosed between these two markers.\r\nBut there are a few important conditions:\r\n\r\nThe initial and final markers are always different.\r\nIf there is no initial marker, then the first character should be considered the beginning of a string.\r\nIf there is no final marker, then the last character should be considered the ending of a string.\r\nIf the initial and final markers are missing then simply return the whole string.\r\nIf the final marker comes before the initial marker, then return an empty string.\r\n\r\nInput: Three arguments. All of them are strings. The second and third arguments are the initial and final markers.\r\nOutput: A string.\r\n\"\"\"\r\n\r\ntxt = \"heiko\"\r\nprint(txt.index(\"ko\"))\r\nprint(txt.find(\"ki\"))\r\nprint(txt[txt.find(\"he\"):])\r\ndef between_markers(text: str, begin: str, end: str) -> str:\r\n \"\"\"\r\n returns substring between two given markers\r\n \"\"\"\r\n s,e = text.find(begin), text.find(end)\r\n print(s, e)\r\n if e == -1:\r\n if e == s:\r\n return text\r\n else: return text[s + len(begin):]\r\n elif s == -1:\r\n return text[:e]\r\n elif s < e:\r\n return text[s + len(begin):e]\r\n else:\r\n return \"\"\r\n\r\nprint(between_markers('No [/b]b]hi', '[b]', '[/b]'))","repo_name":"greatcrock/First","sub_path":"ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74720229316","text":"import asyncio\nimport json\n\nfrom async_swgoh_help import async_swgoh_help, settings\n\n\n# Change the settings below\n#creds = settings('deesnow', 'R594HXam8guw')\ncreds = settings('deesnow', 'R594HXam8guw')\nclient = async_swgoh_help(creds)\n\nallycodes = [376764962]\n\n\nasync def guild():\n print(\"getGuild called\")\n # zetas = await client.fetchZetas()\n # return zetas['zetas'] --- EZ OK\n guild_allycodes = await client.fetchGuilds(allycodes)\n\n print(\"getGuild return\")\n\n return guild_allycodes\n\nasync def player():\n print(\"getPLayer called\")\n player = await client.fetchPlayers(allycodes)\n print(\"getPLayer return\")\n return player\n\n\nasync def main():\n t1 = loop.create_task(guild())\n t2 = loop.create_task(player())\n await t1\n await t2\n return t1._result, t2._result\n\n\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n t1, t2 = loop.run_until_complete(main())\n print(t1, t2)\n \n ","repo_name":"deesnow/async_swgoh_help","sub_path":"async_example.py","file_name":"async_example.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36759308929","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[25]:\n\n\nget_ipython().system(' pip install sklearn')\n\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport sklearn\nimport pickle\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn import model_selection\nfrom sklearn import linear_model\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold\nfrom numpy import save\n\n\n# In[2]:\n\n\ndata = pd.read_csv(\"cleaned_credit_data.csv\")\n\n\n# In[3]:\n\n\ndata.head()\n\n\n# In[4]:\n\n\npredict = \"response\"\n\nX = data[[\"sex\", \"housing\", \"saving_acct\", \"chk_acct\", \"age\", \"duration\", \"amount\"]]\ny = np.array(data[predict])\n\n\n# In[5]:\n\n\nX.head()\n\n\n# In[6]:\n\n\n#ONE HOT ENCODING THE CATEGORICAL DATA AND NORMALIZE THE NUMERICAL DATA\n\n\n# In[7]:\n\n\n#One hot encoding\nohe = make_column_transformer((OneHotEncoder(), [\"sex\", \"housing\", \"saving_acct\", \"chk_acct\"]), remainder=\"passthrough\")\nx = ohe.fit_transform(X)\n\n#Normalize to 0 to 1\nnormalize = MinMaxScaler()\nnormalize.fit(x)\nx = normalize.transform(x)\n\n\n# In[8]:\n\n\nprint(x)\n\n\n# In[9]:\n\n\n#DIVIDING DATA INTO TRAIN, VALIDATION, AND TES 0.8-0.1-0.1\n\n\n# In[10]:\n\n\nx_train, x_val, y_train, y_val = model_selection.train_test_split(x, y, test_size=0.1)\n\nx_train, x_test, y_train, y_test = model_selection.train_test_split(x_train, y_train, test_size=(0.1/0.9))\n\n\n# In[11]:\n\n\n#LOGISTIC REGRESSION\n\n\n# In[12]:\n\n\nlogisticReg = linear_model.LogisticRegression().fit(x_train, y_train)\nlogisticReg.score(x_test, y_test)\n\n\n# In[13]:\n\n\ny_predicted = logisticReg.predict(x_test)\nconfusion_matrix(y_test, y_predicted)\n\n\n# In[14]:\n\n\n#The number of true positive is 64; false positive is 5; false negative is 18; and true negative is 13 \n\n\n# In[15]:\n\n\n#Logistic Regression, SVC, Random Forest\n\n\n# In[16]:\n\n\nkf = KFold(n_splits=10)\n\nacc_logisticReg = 0\nacc_svc = 0\nacc_ranForest = 0\n\nfor train_index, test_index in kf.split(x):\n x_train, x_test = x[train_index], x[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n logisticReg = linear_model.LogisticRegression()\n logisticReg.fit(x_train, y_train)\n acc1 = logisticReg.score(x_test, y_test)\n\n svc = svm.SVC()\n svc.fit(x_train, y_train)\n acc2 = svc.score(x_test, y_test)\n \n ranForest = RandomForestClassifier(max_depth=20)\n ranForest.fit(x_train, y_train)\n acc3 = ranForest.score(x_test, y_test)\n \n if acc1 > acc_logisticReg:\n acc_logisticReg = acc1\n with open(\"logisticReg.pickle\", \"wb\") as f:\n pickle.dump(logisticReg, f)\n \n if acc2 > acc_svc:\n acc_svc = acc2\n with open(\"SVC.pickle\", \"wb\") as f:\n pickle.dump(svc, f)\n \n if acc3 > acc_ranForest:\n acc_ranForest = acc3\n with open(\"ranForest.pickle\", \"wb\") as f:\n pickle.dump(ranForest, f)\n\n\n# In[18]:\n\n\nprint(\"Accuracy of Logistic Regression: \", acc_logisticReg)\nprint(\"Accuracy of SVC: \", acc_svc)\nprint(\"Accuracy of Random Forest: \", acc_ranForest)\n\n\n# In[19]:\n\n\n#Save numpy data to a file\nsave(\"x_data.npy\", x)\nsave(\"y_data.npy\", y)\n\n\n# In[20]:\n\n\n#Load models from pickle\n\n\n# In[21]:\n\n\npickle_in = open(\"logisticReg.pickle\", \"rb\")\nlogisticReg = pickle.load(pickle_in)\n\npickle_in = open(\"SVC.pickle\", \"rb\")\nsvc = pickle.load(pickle_in)\n\npickle_in = open(\"ranForest.pickle\", \"rb\")\nranForest = pickle.load(pickle_in)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"pradanadan/credit-risk-scoring","sub_path":"sklearn_machine_learning.py","file_name":"sklearn_machine_learning.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39924976252","text":"import pytest\nfrom pathlib import Path\nfrom sqlparse import split, parse\nfrom sqlparse.tokens import Keyword\n\ntopdir = Path(__file__).resolve().parent.parent\n\nsql_files = list(sorted(topdir.glob(\"schema/**/*.sql\")))\n\n\n@pytest.mark.parametrize(\"path\", sql_files, ids = lambda path: str(path))\ndef test_sql_script(path):\n sql = path.read_text(encoding = \"utf-8\")\n\n statements = [parse(s)[0] for s in split(sql)]\n\n if verify_script(path):\n final_type = \"rollback\"\n else:\n final_type = \"commit\"\n\n is_final_type = lambda statement: statement.get_type() == final_type.upper()\n\n has_begin = statements[0].token_first(skip_cm = True).match(Keyword, [\"begin\"])\n has_final = is_final_type(statements[-1])\n has_premature = any(map(is_final_type, statements[:-1]))\n\n assert has_begin, f\"{path.relative_to(topdir)}: first statement is not begin\"\n assert has_final, f\"{path.relative_to(topdir)}: final statement is not a {final_type}\"\n assert not has_premature, f\"{path.relative_to(topdir)}: premature {final_type}(s) found\"\n\n\ndef verify_script(path) -> bool:\n return path.relative_to(topdir).parts[:2] == (\"schema\", \"verify\")\n","repo_name":"seattleflu/id3c","sub_path":"tests/sqitch-transactions.py","file_name":"sqitch-transactions.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"62"} +{"seq_id":"25047242520","text":"from fastapi import APIRouter, status, Depends, Query\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom app.feedback import views\nfrom app.feedback.schemas import CreateFeedback, GetFeedback, PaginateFeedbacks, UpdateFeedback\nfrom app.permission import is_active, is_superuser\nfrom app.schemas import Message\nfrom db import get_db\n\nfeedbacks_router = APIRouter()\n\n\n@feedbacks_router.post(\n '/',\n name='Create feedback',\n description='Create feedback',\n response_description='Message',\n response_model=Message,\n status_code=status.HTTP_201_CREATED,\n tags=['feedbacks'],\n)\nasync def create_feedback(\n schema: CreateFeedback,\n user_id: int = Depends(is_active),\n db: AsyncSession = Depends(get_db),\n):\n return await views.create_feedback(db, user_id, schema)\n\n\n@feedbacks_router.get(\n '/',\n name='Get all feedbacks',\n description='Get all feedbacks',\n response_description='Feedbacks',\n response_model=PaginateFeedbacks,\n status_code=status.HTTP_200_OK,\n tags=['feedbacks'],\n dependencies=[Depends(is_superuser)],\n)\nasync def get_all_feedbacks(\n page: int = Query(default=1, gt=0),\n page_size: int = Query(default=1, gt=0),\n db: AsyncSession = Depends(get_db),\n):\n return await views.get_all_feedbacks(db=db, page=page, page_size=page_size)\n\n\n@feedbacks_router.get(\n '/sort',\n name='Sort feedbacks',\n description='Sort feedbacks',\n response_description='Feedbacks',\n response_model=PaginateFeedbacks,\n status_code=status.HTTP_200_OK,\n tags=['feedbacks'],\n dependencies=[Depends(is_superuser)],\n)\nasync def sort_feedbacks(\n desc: bool,\n page: int = Query(default=1, gt=0),\n page_size: int = Query(default=1, gt=0),\n db: AsyncSession = Depends(get_db),\n):\n return await views.sort_feedbacks(db=db, page=page, page_size=page_size, desc=desc)\n\n\n@feedbacks_router.get(\n '/{pk}',\n name='Get feedback',\n description='Get feedback',\n response_description='Feedback',\n response_model=GetFeedback,\n status_code=status.HTTP_200_OK,\n tags=['feedbacks'],\n dependencies=[Depends(is_superuser)],\n)\nasync def get_feedback(pk: int, db: AsyncSession = Depends(get_db)):\n return await views.get_feedback(db, pk)\n\n\n@feedbacks_router.put(\n '/{pk}',\n name='Update feedback',\n description='Update feedback',\n response_description='Feedback',\n response_model=GetFeedback,\n status_code=status.HTTP_200_OK,\n tags=['feedbacks'],\n dependencies=[Depends(is_superuser)],\n)\nasync def update_feedback(pk: int, schema: UpdateFeedback, db: AsyncSession = Depends(get_db)):\n return await views.update_feedback(db, pk, schema)\n\n\n@feedbacks_router.delete(\n '/{pk}',\n name='Delete feedback',\n description='Delete feedback',\n response_description='Message',\n response_model=Message,\n status_code=status.HTTP_200_OK,\n tags=['feedbacks'],\n dependencies=[Depends(is_superuser)],\n)\nasync def delete_feedback(pk: int, db: AsyncSession = Depends(get_db)):\n return await views.delete_feedback(db, pk)\n","repo_name":"DiSkills/Anti-Freelancer","sub_path":"services/other/app/feedback/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19681916468","text":"# -*- coding: utf-8 -*-\n# need this if you want to save tfidf_matr\n# explorar usar tf-id\n\"\"\"\nTF-idf\n\"\"\"\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport sara.core.cloud as cloud\nimport sara.core.database as bd\nfrom sara.core.pre_processamento import pre_processamento\n\n\ndef carrega_tweet_mongo(nome_base, colecao):\n \"\"\"Carrega e realiza a chamada de limpeza dos tweets\"\"\"\n cliente = bd.inicia_conexao()\n colecao = bd.carregar_banco(cliente, nome_base, colecao)\n tweets = tweets = colecao.find({})\n lista_tweets = []\n for tweet in tweets:\n try:\n full_tweet = tweet[\"extended_tweet\"][\"full_text\"]\n if len(full_tweet) > 1:\n lista_tweets.append(\n pre_processamento(full_tweet)\n )\n except Exception:\n pass\n return lista_tweets\n\n\n# for i in lista_tweets:\n# print(i)\n# #limiar minimo para selecionar o topico\nlimiar = 0.001\ntopicos_validos = []\n\n\ndef completo(lista):\n \"\"\"\n Combina as strings\n \"\"\"\n string = \"\"\n for i in lista:\n string += i + \" \"\n return [string]\n\n\ndef getkey(item):\n return item[1]\n\n\ndef main(banco, colecao):\n lista_tweets = carrega_tweet_mongo(banco, colecao)\n # Encontra as palavras mais relevantes usando abordagem de tf-idf\n doc_completo = completo(lista_tweets)\n tf = TfidfVectorizer(lista_tweets)\n tfidf_matrix = tf.fit_transform(doc_completo)\n # corpus\n feature_names = tf.get_feature_names()\n doc = 0\n feature_index = tfidf_matrix[doc, :].nonzero()[1]\n tfidf_scores = zip(\n feature_index, [tfidf_matrix[doc, x] for x in feature_index]\n )\n\n final = []\n l_limiar = []\n for w, s in [(feature_names[i], s) for (i, s) in tfidf_scores]:\n final.append((w, s))\n\n print(len(l_limiar))\n last = sorted(final, key=getkey, reverse=True)\n\n for i in last:\n if float(i[1]) > limiar:\n retorno = pre_processamento.pre_processamento(i[0])\n if len(retorno) > 1:\n if float(i[1]) > 0.01:\n # print(i,\"Cloud:\",i)\n topicos_validos.append(i)\n\n print(len(topicos_validos))\n cloud.cloud_tf(topicos_validos, 100)\n return topicos_validos\n","repo_name":"cmagnobarbosa/sara_public","sub_path":"sara/core/bagwords.py","file_name":"bagwords.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"18922967933","text":"# -*- coding:utf-8 -*-\nimport torch.nn as nn\n\n\nclass BasicCNN(nn.Module):\n def __init__(self, in_channels, output_channels, kernel_size, stride, padding=0):\n super().__init__()\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=output_channels,\n kernel_size=kernel_size, stride=stride, padding=padding)\n self.bn = nn.BatchNorm2d(num_features=output_channels)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, inputs):\n x = self.conv(inputs)\n x = self.bn(x)\n x = self.relu(x)\n return x\n","repo_name":"dlcjfgmlnasa/Image-Classification","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"18988681946","text":"\"\"\" AlgoRithm for Charge Transfer Inefficiency Correction\r\n\r\n Add or remove image trailing due to charge transfer inefficiency (CTI) in CCD detectors.\r\n \r\n https://github.com/jkeger/arcticpy\r\n\r\n Jacob Kegerreis (2020) jacob.kegerreis@durham.ac.uk\r\n James Nightingale\r\n Richard Massey\r\n\r\n WIP...\r\n\"\"\"\r\nimport numpy as np\r\nimport os\r\nfrom copy import deepcopy\r\n\r\nfrom arctic.roe import (\r\n ROE,\r\n ROEChargeInjection,\r\n ROETrapPumping,\r\n)\r\nfrom arctic.ccd import CCD, CCDPhase\r\nfrom arctic.traps import (\r\n Trap,\r\n TrapLifetimeContinuum,\r\n TrapLogNormalLifetimeContinuum,\r\n TrapInstantCapture,\r\n)\r\nfrom arctic.trap_managers import (\r\n AllTrapManager,\r\n TrapManager,\r\n TrapManagerTrackTime,\r\n TrapManagerInstantCapture,\r\n)\r\nfrom arctic import util\r\n\r\n\r\ndef _clock_charge_in_one_direction(\r\n image, roe, ccd, traps, express, offset, window_row, window_column,\r\n):\r\n \"\"\"\r\n Add CTI trails to an image by trapping, releasing, and moving electrons \r\n along their independent columns.\r\n\r\n Parameters\r\n ----------\r\n image : np.ndarray\r\n The input array of pixel values.\r\n roe : ROE\r\n The object describing the clocking of electrons with read-out \r\n electronics.\r\n ccd : CCD\r\n The object describing the CCD volume. \r\n traps : [Trap] or [[Trap]]\r\n A list of one or more trap objects. To use different types of traps \r\n that will require different watermark levels, pass a 2D list of \r\n lists, i.e. a list containing lists of one or more traps for each \r\n type. \r\n express : int\r\n The factor by which pixel-to-pixel transfers are combined for \r\n efficiency.\r\n\r\n Returns\r\n -------\r\n image : np.ndarray\r\n The output array of pixel values.\r\n \"\"\"\r\n\r\n # Parse inputs\r\n n_rows_in_image, n_columns_in_image = image.shape\r\n if window_row is None:\r\n window_row = range(n_rows_in_image)\r\n elif isinstance(window_row, int):\r\n window_row = [window_row]\r\n if window_column is None:\r\n window_column = range(n_columns_in_image)\r\n\r\n # Calculate the number of times that the effect of each pixel-to-pixel transfer can be replicated\r\n express_matrix, when_to_store_traps = roe.express_matrix_from_pixels_and_express(\r\n window_row, express=express, offset=offset,\r\n )\r\n (n_express, n_rows_to_process) = express_matrix.shape\r\n\r\n # Decide in advance which steps need to be evaluated, and which can be skipped\r\n phases_with_traps = [i for i, x in enumerate(ccd.fraction_of_traps) if x > 0]\r\n n_phases_with_traps = len(phases_with_traps)\r\n steps_with_nonzero_dwell_time = [i for i, x in enumerate(roe.dwell_times) if x > 0]\r\n n_steps_with_nonzero_dwell_time = len(steps_with_nonzero_dwell_time)\r\n\r\n # Set up an array of trap managers able to monitor the occupancy of (all types of) traps\r\n max_n_transfers = n_rows_to_process * n_steps_with_nonzero_dwell_time\r\n trap_managers = AllTrapManager(traps, max_n_transfers + 1, ccd)\r\n\r\n # Temporarily expand image, if charge released from traps ever migrates to\r\n # a different charge packet, at any time during the clocking sequence\r\n n_rows_zero_padding = max(roe.pixels_accessed_during_clocking) - min(\r\n roe.pixels_accessed_during_clocking\r\n )\r\n if n_rows_zero_padding > 0:\r\n image = np.concatenate(\r\n (\r\n image,\r\n np.zeros((n_rows_zero_padding, n_columns_in_image), dtype=image.dtype),\r\n ),\r\n axis=0,\r\n )\r\n\r\n # Read out one column of pixels through one (column of) traps\r\n for column_index in range(len(window_column)):\r\n\r\n # Monitor the traps in every pixel, or just one (express=1) or a few\r\n # (express=a few) then replicate their effect\r\n for express_index in range(n_express):\r\n\r\n # Reset trap occupancy levels\r\n trap_managers.restore()\r\n checksum = np.sum(image[:, window_column[column_index]])\r\n\r\n # Each pixel\r\n for row_index in range(len(window_row)):\r\n\r\n express_multiplier = express_matrix[express_index, row_index]\r\n if express_multiplier == 0:\r\n continue\r\n\r\n for clocking_step in steps_with_nonzero_dwell_time:\r\n n_electrons_trapped = 0\r\n\r\n for phase in phases_with_traps:\r\n\r\n # Extract initial number of electrons from the relevant charge cloud\r\n potential = roe.clock_sequence[clocking_step][phase]\r\n row_read = (\r\n window_row[row_index]\r\n + potential[\"capture_from_which_pixel\"]\r\n )\r\n n_free_electrons = (\r\n image[row_read, window_column[column_index]]\r\n ) * potential[\"high\"]\r\n\r\n # Allow electrons to be released from and captured by charge traps\r\n n_electrons_released_and_captured = 0\r\n for trap_manager in trap_managers[phase]:\r\n n_electrons_released_and_captured += trap_manager.n_electrons_released_and_captured(\r\n n_free_electrons=n_free_electrons,\r\n dwell_time=roe.dwell_times[clocking_step],\r\n ccd_filling_function=ccd.cloud_fractional_volume_from_n_electrons_in_phase(\r\n phase\r\n ),\r\n express_multiplier=express_multiplier,\r\n )\r\n\r\n # Return the released electrons back to the relevant charge cloud\r\n row_write = (\r\n window_row[row_index] + potential[\"release_to_which_pixel\"]\r\n )\r\n image[row_write, window_column[column_index]] += (\r\n n_electrons_released_and_captured\r\n * potential[\"release_fraction_to_pixel\"]\r\n * express_multiplier\r\n )\r\n\r\n # Save trap occupancy at the end of one express\r\n if when_to_store_traps[express_index, row_index]:\r\n trap_managers.save()\r\n\r\n # Reset watermarks, effectively setting trap occupancy to zero\r\n # input(\"Press Enter to continue...\")\r\n if roe.empty_traps_between_columns:\r\n trap_managers.empty_all_traps()\r\n else:\r\n trap_managers.save()\r\n\r\n # Recombine the image for multi-phase clocking\r\n # if n_simple_phases > 1:\r\n # image = image.reshape((int(rows / n_simple_phases), n_simple_phases, columns)).sum(axis=1)\r\n # Unexpand image\r\n if n_rows_zero_padding > 0:\r\n image = image[0:-n_rows_zero_padding, :]\r\n\r\n return image\r\n\r\n\r\ndef add_cti(\r\n image,\r\n parallel_express=0,\r\n parallel_roe=None,\r\n parallel_ccd=None,\r\n parallel_traps=None,\r\n parallel_offset=0,\r\n parallel_window=None,\r\n serial_express=0,\r\n serial_roe=None,\r\n serial_ccd=None,\r\n serial_traps=None,\r\n serial_offset=0,\r\n serial_window=None,\r\n):\r\n \"\"\"\r\n Add CTI trails to an image by trapping, releasing, and moving electrons \r\n along their independent columns, for parallel and/or serial clocking.\r\n\r\n Parameters\r\n ----------\r\n image : np.ndarray\r\n The input array of pixel values, assumed to be in units of electrons.\r\n parallel_express : int\r\n The factor by which pixel-to-pixel transfers are combined for \r\n efficiency for parallel clocking.\r\n parallel_roe : ROE\r\n The object describing the clocking read-out electronics for parallel \r\n clocking.\r\n parallel_ccd : CCD\r\n The object describing the CCD volume for parallel clocking. For \r\n multi-phase clocking optionally use a list of different CCD volumes\r\n for each phase, in the same size list as parallel_roe.dwell_times.\r\n parallel_traps : [Trap] or [[Trap]]\r\n A list of one or more trap objects for parallel clocking. To use \r\n different types of traps that will require different watermark \r\n levels, pass a 2D list of lists, i.e. a list containing lists of \r\n one or more traps for each type.\r\n parallel_offset : int\r\n The supplied image array is a postage stamp offset this number of \r\n pixels from the readout register. This increases the number of\r\n pixel-to-pixel transfers assumed if readout is normal (and has no\r\n effect for other types of clocking).\r\n parallel_window : range() or list\r\n For speed, calculate only the effect on this subset of pixels. \r\n Note that, because of edge effects, you should start the range several \r\n pixels before the actual region of interest.\r\n serial_* : *\r\n The same as the parallel_* objects described above but for serial \r\n clocking instead.\r\n\r\n Returns\r\n -------\r\n image : np.ndarray\r\n The output array of pixel values.\r\n \"\"\"\r\n\r\n # If ROE not provided then assume simple, single-phase clocking\r\n if parallel_roe is None:\r\n parallel_roe = ROE()\r\n if serial_roe is None:\r\n serial_roe = ROE()\r\n\r\n # Don't modify the external array passed to this function\r\n image = deepcopy(image)\r\n\r\n if parallel_traps is not None:\r\n\r\n image = _clock_charge_in_one_direction(\r\n image=image,\r\n roe=parallel_roe,\r\n traps=parallel_traps,\r\n ccd=parallel_ccd,\r\n express=parallel_express,\r\n offset=parallel_offset,\r\n window_row=parallel_window,\r\n window_column=serial_window,\r\n )\r\n\r\n if serial_traps is not None:\r\n\r\n image = image.T.copy()\r\n\r\n image = _clock_charge_in_one_direction(\r\n image=image,\r\n roe=serial_roe,\r\n traps=serial_traps,\r\n ccd=serial_ccd,\r\n express=serial_express,\r\n offset=serial_offset,\r\n window_row=serial_window,\r\n window_column=parallel_window,\r\n )\r\n\r\n image = image.T\r\n\r\n return image\r\n\r\n\r\ndef remove_cti(\r\n image,\r\n iterations,\r\n parallel_express=0,\r\n parallel_roe=None,\r\n parallel_ccd=None,\r\n parallel_traps=None,\r\n parallel_offset=0,\r\n parallel_window=None,\r\n serial_express=0,\r\n serial_roe=None,\r\n serial_ccd=None,\r\n serial_traps=None,\r\n serial_offset=0,\r\n serial_window=None,\r\n):\r\n \"\"\"\r\n Add CTI trails to an image by trapping, releasing, and moving electrons \r\n along their independent columns, for parallel and/or serial clocking.\r\n\r\n Parameters\r\n ----------\r\n image : np.ndarray\r\n The input array of pixel values.\r\n iterations : int\r\n If CTI is being corrected, iterations determines the number of times \r\n clocking is run to perform the correction via forward modeling. For \r\n adding CTI only one run is required and iterations is ignored.\r\n parallel_express : int\r\n The factor by which pixel-to-pixel transfers are combined for \r\n efficiency for parallel clocking.\r\n parallel_roe : ROE\r\n The object describing the clocking read-out electronics for parallel \r\n clocking.\r\n parallel_ccd : CCD\r\n The object describing the CCD volume for parallel clocking. For \r\n multi-phase clocking optionally use a list of different CCD volumes\r\n for each phase, in the same size list as parallel_roe.dwell_times.\r\n parallel_traps : [Trap] or [[Trap]]\r\n A list of one or more trap objects for parallel clocking. To use \r\n different types of traps that will require different watermark \r\n levels, pass a 2D list of lists, i.e. a list containing lists of \r\n one or more traps for each type.\r\n parallel_offset : int\r\n The supplied image array is a postage stamp offset this number of \r\n pixels from the readout register. This increases the number of\r\n pixel-to-pixel transfers assumed if readout is normal (and has no\r\n effect for other types of clocking).\r\n parallel_window : range() or list\r\n For speed, calculate only the effect on this subset of pixels. \r\n Note that, because of edge effects, you should start the range several \r\n pixels before the actual region of interest.\r\n serial_* : *\r\n The same as the parallel_* objects described above but for serial \r\n clocking instead.\r\n\r\n Returns\r\n -------\r\n image : np.ndarray\r\n The output array of pixel values with CTI removed.\r\n \"\"\"\r\n\r\n # Initialise the iterative estimate of removed CTI\r\n image_remove_cti = deepcopy(image)\r\n\r\n # Estimate the image with removed CTI more precisely each iteration\r\n for iteration in range(iterations):\r\n\r\n image_add_cti = add_cti(\r\n image=image_remove_cti,\r\n parallel_express=parallel_express,\r\n parallel_roe=parallel_roe,\r\n parallel_ccd=parallel_ccd,\r\n parallel_traps=parallel_traps,\r\n parallel_offset=parallel_offset,\r\n parallel_window=parallel_window,\r\n serial_express=serial_express,\r\n serial_roe=serial_roe,\r\n serial_ccd=serial_ccd,\r\n serial_traps=serial_traps,\r\n serial_offset=serial_offset,\r\n serial_window=serial_window,\r\n )\r\n\r\n # Improved estimate of removed CTI\r\n image_remove_cti += image - image_add_cti\r\n\r\n return image_remove_cti\r\n","repo_name":"rjmassey/pyarctic","sub_path":"arctic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34176879411","text":"from include import *\n\nclass MLP(torch.nn.Module):\n # This is the MLP template for the Initialization, Vertex, Edge networks (see Table 2 in the appendix)\n def __init__(self, Din, Dhid, Dout):\n '''\n Din: input dimension\n Dhid: a list of hidden layer size\n Dout: output dimension\n '''\n super(MLP, self).__init__()\n\n self.layerIn = torch.nn.Linear(Din, Dhid[0])\n self.hidden = torch.nn.ModuleList()\n for ii in range(len(Dhid)-1):\n self.hidden.append(torch.nn.Linear(Dhid[ii], Dhid[ii+1]))\n self.layerOut = torch.nn.Linear(Dhid[-1], Dout)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.layerIn(x)\n x = self.relu(x)\n for ii in range(len(self.hidden)):\n x = self.hidden[ii](x)\n x = self.relu(x)\n x = self.layerOut(x)\n return x\n\nclass SubdNet(torch.nn.Module):\n # Subdivision network\n # This network consist of three MLPs (net_init, net_edge, net_vertex), and the forward pass is describe in the Section 5 of the paper \n def __init__(self, params):\n super(SubdNet, self).__init__()\n Din = params['Din'] # input dimension\n Dout = params['Dout'] # output dimension\n\n # initialize three MLPs \n self.net_init = MLP(4*Din -3, params['h_initNet'], Dout)\n self.net_edge = MLP(4*Dout-3, params['h_edgeNet'], Dout)\n self.net_vertex = MLP(4*Dout-3, params['h_vertexNet'], Dout)\n\n self.pool = torch.nn.AvgPool2d((2,1)) # half-edge pool\n self.numSubd = params[\"numSubd\"] # number of subdivisions\n\n def flapNormalization(self, hf, normalizeFeature = False):\n \"\"\"\n FLAPNORMALIZATION normalize the features of a half flap so that it is orientation and translation invariant (see Section 5)\n\n inputs:\n hf: 2*nE x 4 x Dim tensor of half flap features (in world coordinates)\n normalizeFeature: True/False whether to normalize the feature vectors \n\n output: \n hf_normalize: 2*nE x 4 x Dim tensor of half flap features (in local coordinates)\n localFrames a 3-by-3 matrix [b1; b2; b3] with frames b1, b2, b3\n\n Note: \n we only set \"normalizeFeature\" to True in the initialization network to make the differential coordinate features invariant to rigid motions, see figure 18 (top)\n \"\"\"\n\n V = hf[:,:,:3] # half flap vertex positison\n F = torch.tensor([[0,1,2],[1,0,3]]) # half flap face list\n\n # 1st frame: edge vector\n b1 = (V[:,1,:] - V[:,0,:]) / torch.norm(V[:,1,:] - V[:,0,:],dim = 1).unsqueeze(1)\n\n # 3rd frame: edge normal (avg of face normals)\n vec1 = V[:,F[:,1],:] - V[:,F[:,0],:]\n vec2 = V[:,F[:,2],:] - V[:,F[:,0],:]\n FN = torch.cross(vec1, vec2) # nF x 2 x 3\n FNnorm = torch.norm(FN, dim = 2)\n FN = FN / FNnorm.unsqueeze(2)\n eN = FN[:,0,:] + FN[:,1,:]\n b3 = eN / torch.norm(eN, dim = 1).unsqueeze(1)\n\n # 2nd frame: their cross product\n b2 = torch.cross(b3, b1)\n\n # concatenage all local frames\n b1 = b1.unsqueeze(1)\n b2 = b2.unsqueeze(1)\n b3 = b3.unsqueeze(1)\n localFrames = torch.cat((b1,b2,b3), dim = 1)\n\n # normalize features\n hf_pos = hf[:,:,:3] # half flap vertex position\n hf_feature = hf[:,:,3:] # half flap features\n hf_pos = hf_pos - V[:,0,:].unsqueeze(1) # translate\n hf_pos = torch.bmm(hf_pos, torch.transpose(localFrames,1,2))\n if normalizeFeature: # if also normalize the feature using local frames\n assert(hf_feature.size(2) == 3)\n hf_feature = torch.bmm(hf_feature, torch.transpose(localFrames,1,2))\n hf_normalize = torch.cat((hf_pos, hf_feature), dim = 2)\n return hf_normalize, localFrames\n\n def v2hf(self, fv, hfIdx):\n '''\n V2HF re-index the vertex feature (fv) to half flaps features (hf), given half flap index list (hfIdx)\n '''\n # get half flap indices\n fv0 = fv[hfIdx[:,0],:].unsqueeze(1) # 2*nE x 1 x Dout\n fv1 = fv[hfIdx[:,1],:].unsqueeze(1) # 2*nE x 1 x Dout\n fv2 = fv[hfIdx[:,2],:].unsqueeze(1) # 2*nE x 1 x Dout\n fv3 = fv[hfIdx[:,3],:].unsqueeze(1) # 2*nE x 1 x Dout\n hf = torch.cat((fv0,fv1,fv2,fv3), dim = 1) # 2*nE x 4 x Dout\n\n # normalize the half flap features\n hf_normalize, localFrames = self.flapNormalization(hf) \n hf_normalize = hf_normalize.view(hf_normalize.size(0), -1) \n hf_normalize = hf_normalize[:,3:] # remove the first 3 components as they are always (0,0,0)\n return hf_normalize, localFrames\n \n def v2hf_initNet(self, fv, hfIdx):\n '''\n V2HF_INITNET re-index the vertex feature (fv) to half flaps features (hf), given half flap index list (hfIdx). This is for the initialization network only\n '''\n # get half flap indices\n fv0 = fv[hfIdx[:,0],:].unsqueeze(1) # 2*nE x 1 x Dout\n fv1 = fv[hfIdx[:,1],:].unsqueeze(1) # 2*nE x 1 x Dout\n fv2 = fv[hfIdx[:,2],:].unsqueeze(1) # 2*nE x 1 x Dout\n fv3 = fv[hfIdx[:,3],:].unsqueeze(1) # 2*nE x 1 x Dout\n hf = torch.cat((fv0,fv1,fv2,fv3), dim = 1) # 2*nE x 4 x Dout\n\n # normalize the half flap features (including the vector of differential coordinates see figure 18)\n hf_normalize, localFrames = self.flapNormalization(hf, True) \n hf_normalize = hf_normalize.view(hf_normalize.size(0), -1) \n hf_normalize = hf_normalize[:,3:] # remove the first 3 components as they are always (0,0,0)\n return hf_normalize, localFrames\n\n def local2Global(self, hf_local, LFs):\n '''\n LOCAL2GLOBAL turns position features (the first three elements) described in the local frame of an half-flap to world coordinates \n '''\n hf_local_pos = hf_local[:,:3] # get the vertex position features\n hf_feature = hf_local[:,3:] # get the high-dim features\n c0 = hf_local_pos[:,0].unsqueeze(1)\n c1 = hf_local_pos[:,1].unsqueeze(1)\n c2 = hf_local_pos[:,2].unsqueeze(1)\n hf_global_pos = c0*LFs[:,0,:] + c1*LFs[:,1,:] + c2*LFs[:,2,:]\n hf_global = torch.cat((hf_global_pos, hf_feature), dim = 1)\n return hf_global\n\n def halfEdgePool(self, fhe):\n '''\n average pooling of half edge features, see figure 17 (right)\n '''\n fhe = fhe.unsqueeze(0).unsqueeze(0)\n fe = self.pool(fhe)\n fe = fe.squeeze(0).squeeze(0)\n return fe\n\n def oneRingPool(self, fhe, poolMat, dof):\n '''\n average pooling over vertex one rings, see figure 17 (left, middle))\n '''\n fv = torch.spmm(poolMat, fhe)\n fv /= dof.unsqueeze(1) # average pooling\n return fv\n\n def edgeMidPoint(self, fv, hfIdx):\n '''\n get the mid point position of each edge\n '''\n Ve0 = fv[hfIdx[:,0],:3] \n Ve1 = fv[hfIdx[:,1],:3] \n Ve = (Ve0 + Ve1) / 2.0\n Ve = self.halfEdgePool(Ve)\n return Ve\n\n def forward(self, fv, mIdx, HFs, poolMats, DOFs):\n outputs = []\n\n # initialization step (figure 17 left)\n fv_input_pos = fv[:,:3]\n fhf, LFs = self.v2hf_initNet(fv, HFs[mIdx][0]) \n fhf = self.net_init(fhf)\n fhf = self.local2Global(fhf, LFs)\n fv = self.oneRingPool(fhf, poolMats[mIdx][0], DOFs[mIdx][0])\n fv[:,:3] += fv_input_pos\n\n outputs.append(fv[:,:3]) \n\n # subdivision starts\n for ii in range(self.numSubd):\n\n # vertex step (figure 17 middle)\n prevPos = fv[:,:3]\n fhf, LFs = self.v2hf(fv,HFs[mIdx][ii]) # 2*nE x 4*Dout\n fhf = self.net_vertex(fhf)\n fhf = self.local2Global(fhf, LFs)\n fv = self.oneRingPool(fhf, poolMats[mIdx][ii], DOFs[mIdx][ii])\n fv[:,:3] += prevPos\n fv_even = fv\n\n # edge step (figure 17 right)\n Ve = self.edgeMidPoint(fv, HFs[mIdx][ii]) # compute mid point\n fhf, LFs = self.v2hf(fv,HFs[mIdx][ii]) # 2*nE x 4*Dout\n fv_odd = self.net_edge(fhf) # 2*nE x Dout\n fv_odd = self.local2Global(fv_odd, LFs)\n fv_odd = self.halfEdgePool(fv_odd) # nE x Dout\n fv_odd[:,:3] += Ve\n\n # concatenate results\n fv = torch.cat((fv_even, fv_odd), dim = 0) # nV_next x Dout\n outputs.append(fv[:,:3])\n\n return outputs","repo_name":"HTDerekLiu/neuralSubdiv","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"62"} +{"seq_id":"72593854917","text":"import fileprocessing\nimport logging\nimport os\nimport glob\nimport time\nimport threading\nimport pandas as pd\nimport dask.dataframe as dd\nimport configparser\nimport urllib.parse\nimport json\nimport queue\n\n# This function will be called in thread to process all files in a folder. So each folder will be processed in a different thread. Note here that each folder also maps to a single table.\ndef process_folder_files(thread, monitor_folder, dir_path, supported_delimiters, server, database, schema_name, connectiontype, rdms_name, usr, pwd, objectexists): \n logging.warning(\"Thread %s: starting\", thread)\n connection = fileprocessing.getdbconnection(server\n , database\n , connectiontype\n , rdms_name\n , usr\n , pwd)\n engine = connection[0]\n # get list of files to process\n files = glob.glob(os.path.join(dir_path, '*'))\n\n # process each file into target table\n for file in files:\n if os.path.isfile(file):\n\n file_name = os.path.basename(file)\n archive_folder = monitor_folder + '/archive/'+schema_name+'/'+str(os.path.basename(dir_path))+'/'\n error_folder = monitor_folder + '/error/'+schema_name+'/'+str(os.path.basename(dir_path))+'/'\n target_table = str(os.path.basename(dir_path)).lower()\n\n # Check that file is not being used (in flight...)\n if not fileprocessing.check_file_status(file):\n continue\n\n # Check that file is not already loaded\n if fileprocessing.is_file_loaded(file_name\n , target_table\n , server\n , database\n , connectiontype\n , rdms_name\n , usr\n , pwd):\n fileprocessing.archive_file(file, archive_folder) # Archive the file\n continue\n\n # Attemp to create panda from file and figure out the delimiter used in file\n data_object = fileprocessing.prep_file(file, supported_delimiters)\n df = data_object[0] # process file i.e. read into dataframe\n meta_data = data_object[1]\n profile_hk = meta_data['profile_hk']\n \n if isinstance(df, pd.DataFrame) or isinstance(df, dd.DataFrame): # if a dataframe was returned\n status = fileprocessing.write_profile_data(df\n , meta_data\n , file\n , target_table\n , schema_name\n , engine) # write date profile\n\n if status[0] == 1: # if profile was not written skip this file\n continue\n status = fileprocessing.load_data(df\n , file\n , target_table\n , schema_name\n , connection) # load data to target database\n if status[0] == 1: # if not able to load data, move file to error folder\n status = fileprocessing.generate_error_log_entry(profile_hk\n , target_table\n , str(status[1])\n , engine)\n fileprocessing.error_file(file, error_folder)\n continue\n fileprocessing.archive_file(file, archive_folder) # Archive the file\n fileprocessing.set_file_processed_status(profile_hk, engine)\n else: # data was not processed into dataframe\n message = 'Error reading file into a dataframe...Make sure format is supported.'\n status = fileprocessing.generate_error_log_entry(file_name\n , target_table\n , message\n , engine)\n fileprocessing.error_file(file, error_folder)\n\n time.sleep(120) # wait 2 minutes before dispose of connection, give some time for archiving\n engine.dispose()\n\n # if this a new table, free up new table queue for next new table\n if not objectexists:\n newtablequeue.get()\n \n logging.warning(\"Thread %s: Ending\", thread)\n\n\nif __name__ == \"__main__\":\n # get information from configuration file.\n config = configparser.ConfigParser()\n config.read('setting.cfg')\n targetserver = urllib.parse.quote(config['DATABASE_SERVER']['SERVER'])\n targetdatabase = config['DATABASE_SERVER']['DATABASE']\n connectiontype = config['DATABASE_SERVER']['CONNECTIONTYPE'] \n rdms = config['DATABASE_SERVER']['RDMS']\n user = config['DATABASE_SERVER']['USER']\n password = urllib.parse.quote(config['DATABASE_SERVER']['PASSWORD'])\n watched_folder = config['FILE_PATH']['ROOTDROPFOLDER']\n delimiters = urllib.parse.quote(config['SUPPORTED_DELIMITERS']['DELIMITERS'].strip()).split('~')\n\n # This will be token used to determine which new table is in creation\n newtablequeue = queue.Queue()\n\n\n while True:\n # Check all active threads\n active_threads = []\n for active_thread in threading.enumerate():\n active_threads.append(active_thread.name)\n \n # If a drop folder does not exist exixt the program\n drop_folder = watched_folder + '/drop/'\n if not os.path.isdir(drop_folder):\n break\n\n # Scan drop folder's first level folders for files\n for (dir_root, dir_name, file_list) in os.walk(drop_folder):\n # and we are just one level deep from drop folder,\n # and folder is not empty,\n # and folder is not currently being processed, start a thread to process files in the folder\n if ((drop_folder != dir_root)\n and (dir_root.count(os.path.sep) ==1)\n and (len(file_list) != 0)\n and str(os.path.basename(dir_root)) not in active_threads):\n \n schema_name = os.path.basename(os.path.dirname(dir_root))\n # If table does not exist, put it in new tables queue, we will only create one new table at a time\n tableexist = fileprocessing.check_table_exists(dir_root\n , targetserver\n , targetdatabase\n , schema_name\n , connectiontype\n , rdms\n , user\n , password)\n\n # We are naming the thread with folder name (So we should have only one thread per folder)\n threadname = os.path.basename(dir_root)\n\n # If it is new table, and we are currently not processing a new table, put in queue\n if not tableexist and newtablequeue.empty():\n newtablequeue.put(threadname)\n # if it is new table, and we are currently processing a new table, then skip and continue\n elif not newtablequeue.empty() and not tableexist:\n continue\n \n folderthread = threading.Thread(target=process_folder_files,\n name=threadname,\n args=(threadname,\n watched_folder,\n dir_root,\n delimiters,\n targetserver,\n targetdatabase,\n schema_name,\n connectiontype,\n rdms,\n user,\n password,\n tableexist,))\n folderthread.start()\n\n time.sleep(5) # Wait for 5 minutes before checking for new files\n","repo_name":"tbofia/DataStagerPlus","sub_path":"datastagerplus.py","file_name":"datastagerplus.py","file_ext":"py","file_size_in_byte":9225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13322985121","text":"# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\nfrom nose.tools import eq_, raises\nfrom mock import Mock, patch\n\nfrom lamvery.actions.events import EventsAction\n\n\ndef default_args():\n args = Mock()\n args.conf_file = '.lamvery.yml'\n args.keep_empty_events = False\n return args\n\n\nclass EventsActionTestCase(TestCase):\n\n @raises(Exception)\n def test_action_function_not_exists(self):\n with patch('lamvery.actions.base.LambdaClient') as c:\n c.get_function_conf = Mock(return_value={})\n action = EventsAction(default_args())\n action._get_client = Mock(return_value=c)\n action.action()\n\n def test_action(self):\n with patch('lamvery.actions.base.LambdaClient') as c:\n c.get_function_conf = Mock(return_value={'FunctionArn': 'foo'})\n action = EventsAction(default_args())\n action._put_rules = Mock()\n action._put_target = Mock()\n action._clean = Mock()\n action._get_client = Mock(return_value=c)\n action.action()\n\n def test_put_rules(self):\n with patch('lamvery.actions.base.LambdaClient') as c:\n c.get_function_conf = Mock(return_value={'FunctionArn': 'foo'})\n action = EventsAction(default_args())\n action._get_client = Mock(return_value=c)\n action._put_rules(\n remote=[{'Name': 'bar'}],\n local=[{'name': 'foo'}, {'name': 'bar'}],\n function='baz',\n alias=None)\n\n def test_convert_state(self):\n action = EventsAction(default_args())\n eq_(action._convert_state(True), 'DISABLED')\n eq_(action._convert_state(False), 'ENABLED')\n\n def test_search_rule(self):\n action = EventsAction(default_args())\n eq_(action._search_rule([{'Name': 'foo'}, {'name': 'bar'}], 'bar'), {'name': 'bar'})\n eq_(action._search_rule([{'Name': 'foo'}, {'name': 'bar'}], 'baz'), {})\n\n def test_exist_rule(self):\n action = EventsAction(default_args())\n eq_(action._exist_rule([{'Name': 'foo'}, {'name': 'bar'}], 'bar'), True)\n eq_(action._exist_rule([{'Name': 'foo'}, {'name': 'bar'}], 'baz'), False)\n\n def test_exist_target(self):\n action = EventsAction(default_args())\n eq_(action._exist_target([{'Id': 'foo'}, {'id': 'bar'}], 'bar'), True)\n eq_(action._exist_target([{'Id': 'foo'}, {'id': 'bar'}], 'baz'), False)\n\n def test_put_targets(self):\n with patch('lamvery.actions.base.EventsClient') as c:\n c.get_targets_by_rule = Mock(return_value=[{'Id': 'baz'}])\n action = EventsAction(default_args())\n local = [\n {'name': 'foo', 'targets': [{'id': 'baz'}]},\n {'name': 'bar', 'targets': [{'id': 'qux'}]}\n ]\n action._get_client = Mock(return_value=c)\n action._put_targets(local=local, arn='baz')\n\n def test_clean(self):\n with patch('lamvery.actions.base.EventsClient') as c:\n c.get_targets_by_rule = Mock(return_value=[{'Id': 'foo', 'Arn': 'baz'}])\n action = EventsAction(default_args())\n action._get_client = Mock(return_value=c)\n action._clean(\n remote=[{'Name': 'bar'}],\n local=[{'name': 'foo', 'targets': []}, {'name': 'bar', 'targets': []}],\n arn='baz',\n function='qux',\n alias=None)\n action._clean(\n remote=[{'Name': 'bar'}],\n local=[{'name': 'foo', 'targets': []}],\n arn='baz',\n function='qux',\n alias='foobar')\n","repo_name":"marcy-terui/lamvery","sub_path":"tests/lamvery/actions/events_test.py","file_name":"events_test.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"62"} +{"seq_id":"16272777337","text":"import os\nimport sys\nsys.path.append(os.getcwd())\nfrom utils.claude import *\n\n\ndef load_paper():\n with open(r\"paper/ask_paper/paper.txt\", \"r\", encoding=\"utf-8\") as f:\n paper = f.read()\n return paper\n\ndef ask_paper_run():\n paper = \"\"\n \n while True:\n print(f\"Please input a question:\")\n question = input()\n paper = load_paper()\n pmt = paper + \"\\n\" + question\n print(\"....\")\n ask_claude(pmt)\n\n\nif __name__ == \"__main__\":\n ask_paper_run()","repo_name":"xiahan4956/Paper_manager","sub_path":"paper/ask_paper/ask_paper.py","file_name":"ask_paper.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"43258257378","text":"import sys\nfrom typing import NoReturn\n\nfrom colorama import (\n Fore,\n Style\n)\nfrom ppln.runner import Runner\nfrom ppln.hooks.registry import HOOKS\nfrom ppln.hooks.logger.progress_bar import ProgressBarLoggerHook\nfrom ppln.hooks.logger.utils import get_lr\nfrom ppln.utils.misc import master_only\n\nfrom ..utils.progress_bar import ModifiedProgressBar\n\n\n\n@HOOKS.register_module\nclass ModifiedProgressBarHook(ProgressBarLoggerHook):\n\n def before_epoch(self, runner: Runner) -> NoReturn:\n self.bar = ModifiedProgressBar(task_num=len(runner.data_loader), bar_width=self.bar_width)\n\n @master_only\n def after_epoch(self, runner: Runner):\n self.log(runner, update_completed=False)\n sys.stdout.write(f\"\\n\")\n\n def after_iter(self, runner):\n self.log(runner, update_completed=True)\n\n @master_only\n def log(self, runner: Runner, **kwargs):\n epoch_color = Fore.YELLOW\n mode_color = (Fore.RED, Fore.BLUE)[runner.train_mode]\n text_color = (Fore.CYAN, Fore.GREEN)[runner.train_mode]\n epoch_text = f\"{epoch_color}epoch:{Style.RESET_ALL} {runner.epoch + 1:<4}\"\n log_items = [(\" \" * 11, epoch_text)[runner.train_mode], f\"{mode_color}{runner.mode:<5}{Style.RESET_ALL}\"]\n log_items.append(f\"{text_color}iter:{Style.RESET_ALL} {runner.iter + 1}\")\n\n for name, lrs in get_lr(runner.optimizers).items():\n log_items.append(f\"{text_color}{name}_lr:{Style.RESET_ALL} {', '.join([f'{lr:.3e}' for lr in lrs])}\")\n\n for name, value in runner.log_buffer.output.items():\n if isinstance(value, float):\n value = f\"{value:.2f}\" if name in [\"data_time\", \"time\"] else f\"{value:.4f}\"\n log_items.append(f'{text_color}{name}:{Style.RESET_ALL} {value}')\n self.bar.update(f\"{' | '.join(log_items)}\", kwargs[\"update_completed\"])\n","repo_name":"Qovaxx/kaggle-prostate-cancer-grade-assessment","sub_path":"src/psga/train/contrib/hooks/progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"70946192837","text":"from multiprocessing import Pool\nimport os, time, random\n\ndef worker(msg):\n\ttime_start = time.time()\n\n\tprint(\"%s 开始执行,进程号为%d\" %(msg, os.getpid()))\n\ttime.sleep(random.random()*2)\n\ttime_end = time.time()\n\tprint(msg, \"执行完毕,耗时%.4f\" %(time_end - time_start))\n\n\ndef main():\n\tpo = Pool(3)\n\tfor i in range(10):\n\t\tpo.apply_async(worker, (i,))\n\tprint(\"start------------\")\n\tpo.close()\n\tpo.join()\n\tprint(\"end--------------\")\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"IvanDavais/NET","sub_path":"wf_14_使用进程池进行多任务.py","file_name":"wf_14_使用进程池进行多任务.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71970284676","text":"import sys, subprocess\nfrom pip._internal import main as pip_main\n\ndef install_pip(package):\n pip_main(['install', package])\n\ndef install_conda(package):\n # do conda install -y $pkg; done\n # cmd = f\"conda install {package}\"\n cmd = f\"conda create --name test_env --file {package}\"\n try:\n subprocess.check_output(cmd)\n except:\n print(f\"{package} can't be run\")\n\nif __name__ == '__main__':\n '''i am lazy today so you get bad code here future me'''\n\n # sys.argv[1] should be \"pip_requirement.txt\" or \"conda_requirement.txt\"\n with open(sys.argv[2]) as f:\n if sys.argv[1] == 'pip':\n for line in f:\n install_pip(line)\n #=====================\n #==unfinished work\n #=====================\n\n #--------fail to run with python code as well as directly type in anaconda prompt\n # > I used the following command cmd\n # f\"conda create --name test_env --file C:\\Users\\awannaphasch2016\\PycharmProjects\\my_utility\\config\\packages requirement\\conda_requirements.txt\"\n if sys.argv[1] == 'conda':\n\n # for line in f:\n install_conda(sys.argv[1])\n","repo_name":"Anak2016/my_utility","sub_path":"configuration/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13389067818","text":"from tutor_init import *\nimport itertools\n# import sys\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\nMARKERS = ('o', 'x', '+', '.', '<', '>', '^', 'v')\n\n\ndef plot_varying_symbols(x, y, color='red', size=5):\n \"\"\"\n Create a plot with varying symbols\n Parameters\n ----------\n x : numpy array with x data of the points\n y : numpy array with y data of the points\n color : color of the symbols\n\n Returns\n -------\n\n \"\"\"\n markers = itertools.cycle(MARKERS)\n for q, p in zip(x, y):\n plt.plot(q, p, marker=markers.next(), linestyle='', color=color,\n markersize=size)\n\n\ndef damage_vs_S(S, beta, K):\n \"\"\"\n Calculate the damage 1/N for a given stress S\n\n Parameters\n ----------\n S : Stress [Pa]\n beta : coefficient, typically 3\n K : constant\n\n Returns\n -------\n\n \"\"\"\n return K * np.power(S, beta)\n\n# Section 4.3.1 Crossing intensity\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nimport wafo.data as wd\nimport wafo.objects as wo\nimport wafo.misc as wm\n\nxx_sea = wd.sea()\n\nTlength = xx_sea[-1, 0] - xx_sea[0, 0]\nbeta = 3\nK1 = 6.5e-31\nNp = 200\nTp = Tlength / Np\nA = 100e6\nlog.info(\"setting sin wave with Tp={} and T={}\".format(Tp, Tlength))\nNc = 1.0 / damage_vs_S(A, beta, K1)\ndamage = float(Np) / float(Nc)\nlog.info(\"budget at S={} N={}: damage = {} \".format(A, Nc, damage))\n#xx_sea[:, 1] = A * np.cos(2 * np.pi * xx_sea[:, 0]/Tp)\nxx_sea[:, 1] *= 500e6\n\nlog.info(\"loaded sea time series {}\".format(xx_sea.shape))\nts = wo.mat2timeseries(xx_sea)\n\ntp = ts.turning_points()\nmM = tp.cycle_pairs(kind='min2max')\nMm = tp.cycle_pairs(kind='max2min')\nlc = mM.level_crossings(intensity=True)\nT_sea = ts.args[-1] - ts.args[0]\n\n# for i in dir(mM):\n# print(i)\n\n\nts1 = wo.mat2timeseries(xx_sea[:, :])\ntp1 = ts1.turning_points()\nsig_tp = ts.turning_points(h=0, wavetype='astm')\ntry:\n sig_cp = sig_tp.cycle_astm()\n log.info(\"Successfully used cycle_astm\")\nexcept AttributeError:\n log.warning(\"Could use cycle_astm\")\n sig_cp = None\ntp1 = ts1.turning_points()\ntp2 = ts1.turning_points(wavetype='Mw')\nmM1 = tp1.cycle_pairs(kind='min2max')\nMm1 = tp1.cycle_pairs(kind='max2min')\n\ntp_rfc = tp1.rainflow_filter(h=100e6)\nmM_rfc = tp_rfc.cycle_pairs()\ntry:\n mM_rfc_a = tp1.cycle_astm()\nexcept AttributeError:\n mM_rfc_a = None\ntc1 = ts1.trough_crest()\nmin_to_max = True\nrfc_plot = True\nif min_to_max:\n m1, M1 = mM1.get_minima_and_maxima()\n i_min_start = 0\nelse:\n m1, M1 = Mm1.get_minima_and_maxima()\n i_min_start = 2\n\nm_rfc, M_rfc = mM_rfc.get_minima_and_maxima()\n# m_rfc_a, M_rfc_a = mM_rfc_a.get_minima_and_maxima()\nts1.plot('b-')\nif rfc_plot:\n plot_varying_symbols(tp_rfc.args[0::2], m_rfc, color='red', size=10)\n plot_varying_symbols(tp_rfc.args[1::2], M_rfc, color='green', size=10)\nelse:\n plot_varying_symbols(tp.args[i_min_start::2], m1, color='red', size=10)\n plot_varying_symbols(tp.args[1::2], M1, color='green', size=10)\n\nset_windows_title(\"Sea time series\", log)\n\nplt.figure()\nplt.subplot(122),\nmM.plot()\nplt.title('min-max cycle pairs')\nplt.subplot(121),\nmM_rfc.plot()\n\ntitle = 'Rainflow filtered cycles'\nplt.title(title)\nset_windows_title(title)\n\n\n# Min-max and rainflow cycle distributions\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# import wafo.misc as wm\nampmM_sea = mM.amplitudes()\nampRFC_sea = mM_rfc.amplitudes()\nplt.figure()\ntitle = \"s_n_curve\"\nset_windows_title(title)\nS = np.linspace(1e6, 1000e6)\nplt.loglog(S, damage_vs_S(S, beta, K1))\nplt.figure()\nplt.subplot(121)\nstress_range = (1, 1e9)\nn_bins = 100\nwm.plot_histgrm(ampmM_sea, bins=n_bins, range=stress_range)\nplt.xlim(stress_range)\nylim = plt.gca().get_ylim()\nplt.title('min-max amplitude distribution')\nplt.subplot(122)\nif sig_cp is not None:\n wm.plot_histgrm(sig_cp[:, 0], bins=n_bins, range=stress_range)\n plt.gca().set_ylim(ylim)\n title = 'Rainflow amplitude distribution'\n plt.title(title)\n plt.semilogy\n set_windows_title(title)\n\n hist, bin_edges = np.histogram(\n sig_cp[\n :, 0], bins=n_bins, range=stress_range)\n\n plt.figure()\n title = \"my_bins\"\n plt.title(title)\n plt.title(title)\n set_windows_title(title)\n plt.semilogy\n plt.bar(bin_edges[:-1], hist, width=stress_range[1] / n_bins)\n\n print(\"damage min/max : {}\".format(mM_rfc.damage([beta], K1)))\n\n damage_rfc = K1 * np.sum(sig_cp[:, 0] ** beta)\n print(\"damage rfc : {}\".format(damage_rfc))\nplt.show('hold')\n","repo_name":"wafo-project/pywafo","sub_path":"src/wafo/doc/tutorial_scripts/rainflow_example.py","file_name":"rainflow_example.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"62"} +{"seq_id":"17087390037","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_theme()\nimport numpy as np\nfrom statapy.utils import save_fig_default, plotting_function\n\n\n@plotting_function\ndef line(dataset, X, y, order=1, save_fig=save_fig_default):\n \"\"\"\n Line plot of X vs y\n\n\n :param dataset: dataset object\n :param X: X column to plot\n :param y: y column to plot\n :param order:\n :param save_fig: True iff to save figure\n :return: None\n\n * function decorator changes output from function body\n \"\"\"\n sns.regplot(x=X, y=y, data=dataset.data, order=order, line_kws={\"color\": \"red\"})\n title = f\"Line Plot {X} vs {y} from {dataset.name} dataset\"\n plt.title(title)\n plt.xlabel(f\"{X}\")\n plt.ylabel(f\"{y}\")\n plt.legend()\n return plt, title\n\n\n@plotting_function\ndef scatter(dataset, X, y, line_param_results=None, save_fig=save_fig_default):\n \"\"\"\n Scatterplot of X vs y\n\n :param dataset: dataset object\n :param X: X column to plot\n :param y: y column to plot\n :param line_param_results: result of a regression\n :param save_fig: True iff to save figure\n :return: None\n\n * function decorator changes output from function body\n \"\"\"\n sns.scatterplot(x=X, y=y, data=dataset.data)\n if line_param_results is not None:\n x_min = min(dataset.data[X])\n x_max = max(dataset.data[X])\n xs = np.linspace(x_min, x_max, num=100)\n coef = line_param_results.params[X]\n intercept = line_param_results.params['const']\n preds = (xs * coef) + intercept\n plt.plot(xs, preds, color=\"r\", label=\"Regression Line\")\n title = f\"Scatter Plot {X} vs {y} from {dataset.name} dataset\"\n plt.title(title)\n plt.xlabel(f\"{X}\")\n plt.ylabel(f\"{y}\")\n plt.legend()\n return plt, title\n\n\n@plotting_function\ndef bar(dataset, X, y, groupby=None, save_fig=save_fig_default):\n \"\"\"\n Bar plot of X vs y\n\n :param dataset: dataset object\n :param X: X column to plot\n :param y: y column to plot\n :param groupby: Will group the X column observations by groupby column\n :param save_fig: True iff to save figure\n :return: None\n\n * function decorator changes output from function body\n \"\"\"\n sns.barplot(x=X, y=y, data=dataset.data, hue=groupby, ci=95)\n title_add = f\", grouped by {groupby}\" if groupby is not None else \"\"\n title = f\"Bar Plot {X} vs {y}{title_add} from {dataset.name} dataset\"\n plt.title(title)\n plt.xlabel(f\"{X}\")\n plt.ylabel(f\"{y}\")\n plt.legend()\n return plt, title\n\n\n@plotting_function\ndef dist(dataset, X, save_fig=save_fig_default):\n \"\"\"\n Dist plot of X vs y\n\n :param dataset: dataset object\n :param X: X column to plot\n :param save_fig: True iff to save figure\n :return: None\n\n * function decorator changes output from function body\n \"\"\"\n sns.displot(x=X, data=dataset.data)\n title = f\"Dist Plot {X} from {dataset.name} dataset\"\n plt.title(title)\n plt.xlabel(f\"{X}\")\n plt.legend()\n return plt, title\n\n\n","repo_name":"DhananjayAshok/PyStata","sub_path":"statapy/plotting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1673004086","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('group', '0001_initial'),\n ('name', '0001_initial'),\n ('dbtemplate', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='dbtemplate',\n name='group',\n field=models.ForeignKey(blank=True, to='group.Group', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='dbtemplate',\n name='type',\n field=models.ForeignKey(to='name.DBTemplateTypeName'),\n preserve_default=True,\n ),\n ]\n","repo_name":"wpjesus/codematch","sub_path":"ietf/dbtemplate/migrations/0002_auto_20141222_1749.py","file_name":"0002_auto_20141222_1749.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"1738403373","text":"import copy\nimport math\nfrom . import tem\nimport threading\nimport time\nimport json\nimport os\n\nimport itertools\n\ntry:\n\tfrom . import nidaq\nexcept:\n\tnidaq = None\n\nsimu_autofiller = False\nSTAGE_DEBUG = False\n\nclass SimTEM(tem.TEM):\n\tname = 'SimTEM'\n\tprojection_mode = 'imaging'\n\tdef __init__(self):\n\t\ttem.TEM.__init__(self)\n\n\t\tself.high_tension = 120000.0\n\t\tself.cfeg_flashing = 0\n\n\t\tself.magnifications = [\n\t\t\t50,\n\t\t\t100,\n\t\t\t500,\n\t\t\t1000,\n\t\t\t5000,\n\t\t\t25000,\n\t\t\t50000,\n\t\t]\n\t\tself.magnification_index = 0\n\n\t\tself.probe_modes = [\n\t\t\t'micro',\n\t\t\t'nano',\n\t\t]\n\t\tself.probe_mode_index = 0\n\n\t\tself.correctedstage = False\n\t\tself.corrected_alpha_stage = False\n\t\tself.alpha_backlash_delta = 3.0\n\t\tself.stage_axes = ['x', 'y', 'z', 'a']\n\t\tif nidaq is not None:\n\t\t\tself.stage_axes.append('b')\n\t\tself.stage_range = {\n\t\t\t'x': (-1e-3, 1e-3),\n\t\t\t'y': (-1e-3, 1e-3),\n\t\t\t'z': (-5e-4, 5e-4),\n\t\t\t'a':(math.radians(-70),math.radians(70)),\n\t\t\t'b':(math.radians(-90),math.radians(90)), # no limit\n\t\t}\n\t\tself.minimum_stage = {\n\t\t\t'x':5e-8,\n\t\t\t'y':5e-8,\n\t\t\t'z':5e-8,\n\t\t\t'a':math.radians(0.01),\n\t\t\t'b':1e-4,\n\t\t}\n\t\tself.stage_position = {}\n\t\tfor axis in self.stage_axes:\n\t\t\tself.stage_position[axis] = 0.0\n\t\tself.stage_top_speed = 29.78\n\t\tself.stage_speed_fraction = 1.0\n\n\t\tself.screen_current = 0.000001\n\t\tself.intensity_range = (0.0, 1.0)\n\t\tself.intensity = 0.0\n\n\t\tself.stigmators = {\n\t\t\t'condenser': {\n\t\t\t\t'x': 0.0,\n\t\t\t\t'y': 0.0,\n\t\t\t},\n\t\t\t'objective': {\n\t\t\t\t'x': 0.0,\n\t\t\t\t'y': 0.0,\n\t\t\t},\n\t\t\t'diffraction': {\n\t\t\t\t'x': 0.0,\n\t\t\t\t'y': 0.0,\n\t\t\t\t},\n\t\t}\n\n\t\tself.spot_sizes = list(range(1, 11))\n\t\tself.spot_size = self.spot_sizes[0]\n\n\t\tself.beam_tilt = {'x': 0.0, 'y': 0.0}\n\t\tself.beam_shift = {'x': 0.0, 'y': 0.0}\n\t\tself.diffraction_shift = {'x': 0.0, 'y': 0.0}\n\t\tself.image_shift = {'x': 0.0, 'y': 0.0}\n\t\tself.raw_image_shift = {'x': 0.0, 'y': 0.0}\n\n\t\tself.focus = 0.0\n\t\tself.zero_defocus = 0.0\n\n\t\tself.main_screen_scale = 1.0\n\n\t\tself.main_screen_positions = ['up', 'down']\n\t\tself.main_screen_position = self.main_screen_positions[0]\n\t\tself.columnvalveposition = 'open'\n\t\tself.emission = 'on'\n\t\tself.BeamBlank = 'off'\n\t\tself.buffer_pressure = 30.0\n\t\tself.beamstop_position = 'out'\n\n\t\tself.energy_filter = False\n\t\tself.energy_filter_width = 0.0\n\n\t\tself.resetRefrigerant()\n\t\tself.loaded_slot_number = None\n\t\tself.is_init = True\n\n\t\tself.aperture_selection = {'objective':'100','condenser_2':'70','selected_area':'open'}\n\t\tif 'simpar' in self.conf and self.conf['simpar'] and os.path.isdir(self.conf['simpar']):\n\t\t\tself.simpar_dir = self.conf['simpar']\n\t\t\tself.resetSimPar()\n\t\telse:\n\t\t\tself.simpar_dir = None\n\n\tdef resetSimPar(self):\n\t\tif self.simpar_dir:\n\t\t\t# reset to empty file\n\t\t\tf = open(os.path.join(self.simpar_dir,'simpar.json'),'w')\n\t\t\tf.close()\n\n\tdef saveSimPar(self,key,value):\n\t\tif self.simpar_dir:\n\t\t\t# open the file or both read and write and thus locked from others\n\t\t\tf = open(os.path.join(self.simpar_dir,'simpar.json'),'r+')\n\t\t\ttry:\n\t\t\t\tself.all_simpar = json.loads(f.read())\n\t\t\texcept ValueError:\n\t\t\t\tself.all_simpar = {}\n\t\t\tself.all_simpar[key] = value\n\t\t\t# move pointer back to the start\n\t\t\tf.seek(0)\n\t\t\tjstr = json.dumps(self.all_simpar, indent=2, separators=(',',':'))\n\t\t\tf.write(jstr)\n\t\t\t# truncate extra old stuff\n\t\t\tf.truncate()\n\t\t\tf.close()\n\n\tdef printStageDebug(self,msg):\n\t\tif STAGE_DEBUG:\n\t\t\tprint(msg)\n\n\tdef resetRefrigerant(self):\n\t\tself.autofiller_busy = False\n\t\tself.level0 = 100.0\n\t\tself.level1 = 100.0\n\t\tif simu_autofiller:\n\t\t\tt = threading.Thread(target=self.useRefrigerant)\n\t\t\tt.setDaemon(True)\n\t\t\tt.start()\n\n\tdef getColumnValvePositions(self):\n\t\treturn ['open', 'closed']\n\n\tdef getColumnValvePosition(self):\n\t\treturn self.columnvalveposition\n\n\tdef setColumnValvePosition(self, state):\n\t\tif state in ('open','closed'):\n\t\t\tself.columnvalveposition = state\n\t\telse:\n\t\t\traise RuntimeError('invalid column valve position %s' % (state,))\n\n\tdef getHighTension(self):\n\t\treturn self.high_tension\n\n\tdef setHighTension(self, value):\n\t\tself.high_tension = value\n\n\tdef getColdFegFlashing(self):\n\t\tvalue = self.cfeg_flashing\n\t\tvalue_map = [('error', -1), ('off',0),('on',1)]\n\t\tif value == -1:\n\t\t\traise RuntimeError('CFEG Flashing in error state')\n\t\tvalues = map((lambda x: x[1]), value_map)\n\t\tstate = value_map[values.index(value)][0]\n\t\treturn state\n\n\tdef setColdFegFlashing(self, state):\n\t\t# On starts flashing, Off stops flashing\n\t\tif state == self.getColdFegFlashing():\n\t\t\t# do nothing\n\t\t\treturn\n\t\tvalue_map = [('off',0), ('on',1)]\n\t\tstates = map((lambda x: x[0]), value_map)\n\t\tvalue = value_map[states.index(state)][1]\n\t\tself.cfeg_flashing = value\n\t\t# TODO: how long before the the state change in get ?\n\t\ttime.sleep(5)\n\t\tif state == 'on':\n\t\t\twhile self.getColdFegFlashing() == 'on':\n\t\t\t\ttime.sleep(5)\n\t\t\t\tself.cfeg_flashing = 0\n\t\treturn\n\n\tdef getStagePosition(self):\n\t\ttry:\n\t\t\tbeta = nidaq.getBeta()\n\t\t\tself.stage_position.update({'b':beta})\n\t\texcept:\n\t\t\t# give values so it is behaved like real tem implementation\n\t\t\tself.stage_position.update({'b':0.0})\n\t\treturn copy.copy(self.stage_position)\n\n\tdef getStageLimits(self):\n\t\tlimits = self.stage_range\n\t\treturn limits\n\n\tdef _setStagePosition(self,value):\n\t\t# check limit here so that direct move will also be caught\n\t\tself.checkStageLimits(value)\n\t\tkeys = list(value.keys())\n\t\tkeys.sort()\n\t\tfor axis in keys:\n\t\t\t\tself.printStageDebug('%s: %s' % (axis, value[axis]))\n\t\t\t\ttry:\n\t\t\t\t\tself.stage_position[axis] = value[axis]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tcontinue\n\t\tself.printStageDebug('----------')\n\n\tdef setDirectStagePosition(self,value):\n\t\tself._setStagePosition(value)\n\n\tdef checkStageLimits(self, position):\n\t\tself._checkStageXYZLimits(position)\n\t\tself._checkStageABLimits(position)\n\n\tdef _checkStageXYZLimits(self, position):\n\t\tlimit = self.getStageLimits()\n\t\tintersection = set(position.keys()).intersection(('x','y','z'))\n\t\tfor axis in intersection:\n\t\t\tself._validateStageAxisLimit(position[axis], axis)\n\n\tdef _checkStageABLimits(self, position):\n\t\tlimit = self.getStageLimits()\n\t\tintersection = set(position.keys()).intersection(('a','b'))\n\t\tfor axis in intersection:\n\t\t\tself._validateStageAxisLimit(position[axis], axis)\n\n\tdef _validateStageAxisLimit(self, p, axis):\n\t\tlimit = self.getStageLimits()\n\t\tif not (limit[axis][0] < p and limit[axis][1] > p):\n\t\t\tif axis in ('x','y','z'):\n\t\t\t\tum_p = p*1e6\n\t\t\t\traise ValueError('Requested %s axis position %.1f um out of range.' % (axis,um_p))\n\t\t\telse:\n\t\t\t\tdeg_p = math.degrees(p)\n\t\t\t\traise ValueError('Requested %s axis position %.1f degrees out of range.' % (axis,deg_p))\n\n\tdef checkStagePosition(self, position):\n\t\tself.checkStageLimits(position)\n\t\tcurrent = self.getStagePosition()\n\t\tbigenough = {}\n\t\tminimum_stage = self.minimum_stage\n\t\tfor axis in ('x', 'y', 'z', 'a', 'b'):\n\t\t\tif axis in position:\n\t\t\t\tdelta = abs(position[axis] - current[axis])\n\t\t\t\tif delta > minimum_stage[axis]:\n\t\t\t\t\tbigenough[axis] = position[axis]\n\t\treturn bigenough\n\n\tdef setStageSpeed(self, value):\n\t\tself.speed_deg_per_second = value\n\t\tself.stage_speed_fraction = min(value/self.stage_top_speed,1.0)\n\n\tdef getStageSpeed(self):\n\t\t\treturn self.stage_speed_fraction * self.stage_top_speed\n\n\tdef setStagePosition(self, value):\n\t\tself.printStageDebug(list(value.keys()))\n\t\tvalue = self.checkStagePosition(value)\n\n\t\tfor axis in list(value.keys()):\n\t\t\tif axis == 'b' and value['b'] is not None:\n\t\t\t\ttry:\n\t\t\t\t\tnidaq.setBeta(value['b'])\n\t\t\t\texcept:\n\t\t\t\t\tprint('exception, beta not set')\n\t\t# calculate pre-position\n\t\tprevalue = {}\n\t\tprevalue2 = {}\n\t\tstagenow = self.getStagePosition()\n\t\tif self.correctedstage:\n\t\t\tdelta = 2e-6\n\t\t\tfor axis in ('x','y','z'):\n\t\t\t\tif axis in value:\n\t\t\t\t\tprevalue[axis] = value[axis] - delta\n\t\trelax = 0\n\t\tif abs(relax) > 1e-9:\n\t\t\tfor axis in ('x','y'):\n\t\t\t\tif axis in value:\n\t\t\t\t\tprevalue2[axis] = value[axis] + relax\n\t\tif self.corrected_alpha_stage: \n\t\t\t# alpha tilt backlash only in one direction\n\t\t\talpha_delta_degrees = self.alpha_backlash_delta\n\t\t\tif 'a' in list(value.keys()):\n\t\t\t\t\taxis = 'a'\n\t\t\t\t\tprevalue[axis] = value[axis] - alpha_delta_degrees*3.14159/180.0\n\t\tif prevalue:\n\t\t\t# set all axes in prevalue\n\t\t\tfor axis in list(value.keys()):\n\t\t\t\tif axis not in list(prevalue.keys()):\n\t\t\t\t\tprevalue[axis] = value[axis]\n\t\t\t\t\tdel value[axis]\n\t\t\tself._setStagePosition(prevalue)\n\t\t\ttime.sleep(0.2)\n\t\tif abs(relax) > 1e-9 and prevalue2:\n\t\t\tself._setStagePosition(prevalue2)\n\t\t\ttime.sleep(0.2)\n\t\tif self.stage_speed_fraction < 1.0:\n\t\t\tif 'a' in list(value.keys()):\n\t\t\t\talpha_delta = math.degrees(abs(value['a']-stagenow['a']))\n\t\t\t\tmove_time = alpha_delta / (self.stage_speed_fraction*self.stage_top_speed)\n\t\t\t\ttime.sleep(max(move_time,0.2))\n\t\treturn self._setStagePosition(value)\n\n\tdef normalizeLens(self, lens='all'):\n\t\tpass\n\n\tdef getScreenCurrent(self):\n\t\treturn self.screen_current\n\t\n\tdef getIntensity(self):\n\t\treturn self.intensity\n\t\n\tdef setIntensity(self, value):\n\t\tif value < self.intensity_range[0] or value > self.intensity_range[1]:\n\t\t\traise ValueError('invalid intensity')\n\n\tdef getStigmator(self):\n\t\treturn copy.deepcopy(self.stigmators)\n\t\t\n\tdef setStigmator(self, value):\n\t\tfor key in list(self.stigmators.keys()):\n\t\t\tfor axis in list(self.stigmators[key].keys()):\n\t\t\t\ttry:\n\t\t\t\t\tself.stigmators[key][axis] = value[key][axis]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n\n\tdef getSpotSize(self):\n\t\treturn self.spot_size\n\t\n\tdef setSpotSize(self, value):\n\t\tif value not in self.spot_sizes:\n\t\t\traise ValueError('invalid spot size')\n\t\tself.spot_size = value\n\t\n\tdef getBeamTilt(self):\n\t\treturn copy.copy(self.beam_tilt)\n\t\n\tdef setBeamTilt(self, value):\n\t\tfor axis in list(self.beam_tilt.keys()):\n\t\t\ttry:\n\t\t\t\tself.beam_tilt[axis] = value[axis]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\t\n\tdef getBeamShift(self):\n\t\treturn copy.copy(self.beam_shift)\n\n\tdef setBeamShift(self, value):\n\t\tfor axis in list(self.beam_shift.keys()):\n\t\t\ttry:\n\t\t\t\tself.beam_shift[axis] = value[axis]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\n\tdef getDiffractionShift(self):\n\t\treturn copy.copy(self.diffraction_shift)\n\n\tdef setDiffractionShift(self, value):\n\t\tfor axis in list(self.diffraction_shift.keys()):\n\t\t\ttry:\n\t\t\t\tself.diffraction_shift[axis] = value[axis]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\n\tdef getImageShift(self):\n\t\treturn copy.copy(self.image_shift)\n\t\n\tdef setImageShift(self, value):\n\t\tfor axis in list(self.image_shift.keys()):\n\t\t\ttry:\n\t\t\t\tself.image_shift[axis] = value[axis]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\n\tdef getRawImageShift(self):\n\t\treturn copy.copy(self.raw_image_shift)\n\n\tdef setRawImageShift(self, value):\n\t\tfor axis in list(self.raw_image_shift.keys()):\n\t\t\ttry:\n\t\t\t\tself.raw_image_shift[axis] = value[axis]\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\n\tdef getDefocus(self):\n\t\treturn self.focus - self.zero_defocus\n\n\tdef setDefocus(self, value):\n\t\tself.focus = value + self.zero_defocus\n\n\tdef resetDefocus(self):\n\t\tself.zero_defocus = self.focus\n\n\tdef getMagnification(self, index=None):\n\t\tif index is None:\n\t\t\tindex = self.magnification_index\n\t\ttry:\n\t\t\treturn self.magnifications[index]\n\t\texcept IndexError:\n\t\t\traise ValueError('invalid magnification')\n\n\tdef getMainScreenMagnification(self, index=None):\n\t\treturn self.main_screen_scale*self.getMagnification(index=index)\n\n\tdef getMainScreenScale(self):\n\t\treturn self.main_screen_scale\n\n\tdef setMainScreenScale(self, value):\n\t\tself.main_screen_scale = value\n\n\tdef setMagnification(self, value):\n\t\ttry:\n\t\t\tself.magnification_index = self.magnifications.index(value)\n\t\t\tself.saveSimPar('magnification', value)\n\t\texcept ValueError:\n\t\t\traise ValueError('invalid magnification')\n\n\tdef getMagnificationIndex(self, magnification=None):\n\t\tif magnification is not None:\n\t\t\treturn self.magnifications.index(magnification)\n\t\treturn self.magnification_index\n\n\tdef setMagnificationIndex(self, value):\n\t\tif value < 0 or value >= len(self.magnifications):\n\t\t\traise ValueError('invalid magnification index')\n\t\tself.magnification_index = value\n\n\tdef findMagnifications(self):\n\t\t# fake finding magnifications and set projection submod mappings\n\t\tself.setProjectionSubModeMap({})\n\t\tfor mag in self.magnifications:\n\t\t\tif mag < 5000:\n\t\t\t\tself.addProjectionSubModeMap(mag,'mode0',0)\n\t\t\telse:\n\t\t\t\tself.addProjectionSubModeMap(mag,'mode1',1)\n\n\tdef getMagnifications(self):\n\t\treturn list(self.magnifications)\n\n\tdef setMagnifications(self, magnifications):\n\t\tself.magnifications = magnifications\n\n\t\tself.magnifications = magnifications\n\n\tdef getMagnificationsInitialized(self):\n\t\treturn True\n\n\tdef getProbeMode(self):\n\t\tindex = self.probe_mode_index\n\t\ttry:\n\t\t\treturn self.probe_modes[index]\n\t\texcept IndexError:\n\t\t\traise ValueError('invalid probe mode')\n\n\tdef setProbeMode(self, value):\n\t\ttry:\n\t\t\tself.probe_mode_index = self.probe_modes.index(str(value))\n\t\texcept ValueError:\n\t\t\traise ValueError('invalid probe mode')\n\n\tdef getProbeModes(self):\n\t\treturn list(self.probe_modes)\n\n\tdef setProjectionMode(self, value):\n\t\t# This is a fake value set. It forces the projection mode defined by\n\t\t# the class.\n\t\t#print('fake setting to projection mode %s' % (self.projection_mode,))\n\t\tpass\n\n\tdef getMainScreenPositions(self):\n\t\treturn list(self.main_screen_positions)\n\n\tdef getMainScreenPosition(self):\n\t\treturn self.main_screen_position\n\n\tdef setMainScreenPosition(self, value):\n\t\tif value not in self.main_screen_positions:\n\t\t\traise ValueError('invalid main screen position')\n\t\tself.main_screen_position = value\n\n\tdef getFocus(self):\n\t\treturn self.focus\n\n\tdef setFocus(self, value):\n\t\tself.focus = value\n\n\tdef getBufferTankPressure(self):\n\t\treturn self.buffer_pressure\n\n\tdef runBufferCycle(self):\n\t\ttime.sleep(5)\n\t\tself.buffer_pressure -= 5\n\n\tdef getTurboPump(self):\n\t\t\tif not hasattr(self, 'turbo'):\n\t\t\t\tself.turbo = 'off'\n\t\t\treturn self.turbo\n\n\tdef setTurboPump(self, value):\n\t\t\tself.turbo = value\n\n\tdef setEmission(self, value):\n\t\tself.emission = value\n\n\tdef getEmission(self):\n\t\treturn self.emission\n\n\tdef getBeamBlank(self):\n\t\treturn self.BeamBlank\n\t\t\n\tdef setBeamBlank(self, bb):\n\t\tself.BeamBlank = bb\n\n\tdef getEnergyFiltered(self):\n\t\treturn True\n\n\tdef getEnergyFilter(self):\n\t\treturn self.energy_filter\n\n\tdef setEnergyFilter(self, value):\n\t\t#print('TEM energy filter', value)\n\t\tself.energy_filter = bool(value)\n\n\tdef getEnergyFilterWidth(self):\n\t\treturn self.energy_filter_width\n\n\tdef setEnergyFilterWidth(self, value):\n\t\t#print('TEM energy filter width = ', value)\n\t\tself.energy_filter_width = float(value)\n\n\tdef getRefrigerantLevel(self,id=0):\n\t\tif id == 0:\n\t\t\tlevel = self.level0\n\t\telse:\n\t\t\tlevel = self.level1\n\t\tprint(id, level)\n\t\treturn level\n\n\tdef hasAutoFiller(self):\n\t\treturn True\n\n\tdef runAutoFiller(self):\n\t\tself.autofiller_busy = True\n\t\tself.ventRefrigerant()\n\t\tself.addRefrigerant(4)\n\t\tif self.level0 <=40 or self.level1 <=40:\n\t\t\tself.autofiller_busy = True\n\t\t\traise RuntimeError('Force fill failed')\n\t\tself.addRefrigerant(4)\n\t\tself.autofiller_busy = False\n\n\tdef resetAutoFillerError(self):\n\t\tself.autofiller_busy = False\n\t\tself.level0 = 100\n\t\tself.level1 = 100\n\n\tdef isAutoFillerBusy(self):\n\t\treturn self.autofiller_busy\n\n\tdef useRefrigerant(self):\n\t\twhile 1:\n\t\t\tself.level0 -= 11\n\t\t\tself.level1 -= 11\n\t\t\tif self.level1 <= 0:\n\t\t\t\tprint('empty col')\n\t\t\tself.level0 = max(self.level0,0.0)\n\t\t\tself.level1 = max(self.level1,0.0)\n\t\t\tprint('using', self.level0, self.level1)\n\t\t\ttime.sleep(4)\n\n\tdef ventRefrigerant(self):\n\t\tself.level0 -= 10\n\t\tself.level1 -= 10\n\t\tprint('venting', self.level0, self.level1)\n\t\ttime.sleep(2)\n\n\tdef addRefrigerant(self,cycle):\n\t\tfor i in range(cycle):\n\t\t\tself.level0 += 20\n\t\t\tself.level1 += 20\n\t\t\tprint('adding', self.level0, self.level1)\n\t\t\ttime.sleep(2)\n\n\tdef getAutoFillerRemainingTime(self):\n\t\tif simu_autofiller:\n\t\t\treturn min(self.level0, self.level1)\n\t\telse:\n\t\t\treturn -60\n\n\tdef exposeSpecimenNotCamera(self,seconds):\n\t\ttime.sleep(seconds)\n\n\tdef hasGridLoader(self):\n\t\treturn True\n\n\tdef getGridLoaderNumberOfSlots(self):\n\t\tif not self.hasGridLoader():\n\t\t\treturn 0\n\t\treturn 4\n\n\tdef getGridLoaderSlotState(self, number):\n\t\tif self.loaded_slot_number == number:\n\t\t\tstate = 'empty'\n\t\telif self.loaded_slot_number is None and number == 1 and self.is_init is True:\n\t\t\tself.is_init = False\n\t\t\tstate = 'empty'\n\t\telse:\n\t\t\tstate = 'occupied'\n\t\treturn state\n\n\tdef _loadCartridge(self, number):\n\t\tself.loaded_slot_number = number\n\t\ttime.sleep(2)\n\n\tdef _unloadCartridge(self):\n\t\tself.loaded_slot_number = None\n\n\tdef getGridLoaderInventory(self):\n\t\tself.getAllGridSlotStates()\n\n\tdef getApertureMechanisms(self):\n\t\t'''\n\t\tNames of the available aperture mechanism\n\t\t'''\n\t\treturn ['condenser_2', 'objective', 'selected_area']\n\n\tdef getApertureSelections(self, aperture_mechanism):\n\t\tif aperture_mechanism == 'objective':\n\t\t\treturn ['open','100']\n\t\tif aperture_mechanism == 'condenser_2' or aperture_mechanism == 'condenser':\n\t\t\treturn ['150','100','70']\n\t\treturn ['open']\n\n\tdef getApertureSelection(self, aperture_mechanism):\n\t\tif aperture_mechanism == 'condenser':\n\t\t\taperture_mechanism = 'condenser_2'\n\t\treturn self.aperture_selection[aperture_mechanism]\n\n\tdef setApertureSelection(self, aperture_mechanism, name):\n\t\tif aperture_mechanism == 'condenser':\n\t\t\taperture_mechanism = 'condenser_2'\n\t\tif name not in self.getApertureSelections(aperture_mechanism):\n\t\t\tself.aperture_selection[aperture_mechanism] = 'unknown'\n\t\t\treturn False\n\t\tself.aperture_selection[aperture_mechanism] = name\n\t\treturn True\n\n\tdef retractApertureMechanism(self, aperture_mechanism):\n\t\treturn setApertureSelection(aperture_mechanism, 'open')\n\n\tdef getBeamstopPosition(self):\n\t\treturn self.beamstop_position\n\n\tdef setBeamstopPosition(self, value):\n\t\tprint('beamstop set to %s' % (value,))\n\t\tself.beamstop_position = value\n\nclass SimTEM300(SimTEM):\n\tname = 'SimTEM300'\n\tdef __init__(self):\n\t\tSimTEM.__init__(self)\n\n\t\tself.high_tension = 300000.0\n\n\t\tself.magnifications = [\n\t\t\t1550,\n\t\t\t2250,\n\t\t\t3600,\n\t\t\t4800,\n\t\t\t130000\n\t\t]\n\t\tself.magnification_index = 0\n\n\t\tself.probe_modes = [\n\t\t\t'micro',\n\t\t\t'nano',\n\t\t]\n\n\tdef findMagnifications(self):\n\t\t# fake finding magnifications and set projection submod mappings\n\t\tself.setProjectionSubModeMap({})\n\t\tfor mag in self.magnifications:\n\t\t\tif mag < 2000:\n\t\t\t\tself.addProjectionSubModeMap(mag,'LM',0)\n\t\t\telse:\n\t\t\t\tself.addProjectionSubModeMap(mag,'SA',1)\n\nclass SimDiffrTEM(SimTEM):\n\tname = 'SimDiffrTEM'\n\tprojection_mode = 'diffraction'\n\tdef __init__(self):\n\t\tSimTEM.__init__(self)\n\n\t\tself.magnifications = [\n\t\t\t70,\n\t\t\t120,\n\t\t\t520,\n\t\t\t1200,\n\t\t\t5200,\n\t\t\t27000,\n\t\t\t52000,\n\t\t]\n\t\tself.high_tension = 120000.0\n\n\tdef getProjectionMode(self):\n\t\treturn self.projection_mode\n\nclass SimDiffrTEM300(SimDiffrTEM):\n\tname = 'SimDiffrTEM300'\n\tprojection_mode = 'diffraction'\n\tdef __init__(self):\n\t\tSimDiffrTEM.__init__(self)\n\t\t# to use with SimTEM300\n\t\tself.high_tension = 300000.0\n\nclass SimGlacios(SimTEM):\n\tname = 'SimGlacios'\n\tdef __init__(self):\n\t\tSimTEM.__init__(self)\n\n\t\tself.high_tension = 200000.0\n\n\t\tself.magnifications = [\n\t\t\t155,\n\t\t\t1250,\n\t\t\t8500,\n\t\t\t150000\n\t\t]\n\t\tself.magnification_index = 0\n\n\t\tself.probe_modes = [\n\t\t\t'micro',\n\t\t\t'nano',\n\t\t]\n\n\tdef findMagnifications(self):\n\t\t# fake finding magnifications and set projection submod mappings\n\t\tself.setProjectionSubModeMap({})\n\t\tfor mag in self.magnifications:\n\t\t\tif mag < 1000:\n\t\t\t\tself.addProjectionSubModeMap(mag,'LM',1)\n\t\t\telif mag < 2600:\n\t\t\t\tself.addProjectionSubModeMap(mag,'Mi',2)\n\t\t\telse:\n\t\t\t\tself.addProjectionSubModeMap(mag,'SA',3)\n\nclass SimDiffrGlacios(SimDiffrTEM):\n\tname = 'SimDiffrGlacios'\n\tprojection_mode = 'diffraction'\n\tdef __init__(self):\n\t\tSimDiffrTEM.__init__(self)\n\t\tself.high_tension = 200000.0\n\t\tself.magnifications = [\n\t\t\t1100,\n\t\t\t2750,\n\t\t]\n","repo_name":"nysbc/leginon-py3","sub_path":"pyscope/simtem.py","file_name":"simtem.py","file_ext":"py","file_size_in_byte":19312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"74864137477","text":"# https://projecteuler.net/problem=56\n\ndef listnum(x):\n list1 = []\n for i in range(5,x):\n for i1 in range(5,x):\n c = str(i ** i1)\n m = 0\n for i2 in range(len(c)):\n m += int(c[i2])\n list1.append(m)\n return max(list1)\n\n\n","repo_name":"kacperp94/Project-Euler","sub_path":"56.py","file_name":"56.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"86527270768","text":"import os\r\nimport cv2\r\nimport time\r\nimport pickle\r\nimport numpy as np\r\nfrom time import sleep\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nfrom flask import request, redirect\r\nfrom keras.models import load_model\r\nfrom keras.preprocessing import image\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask import Flask, render_template\r\nfrom keras.preprocessing.image import img_to_array\r\n\r\napp = Flask(__name__)\r\napp.config['UPLOAD_FOLDER'] = './static/pictures'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\r\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\r\ndb = SQLAlchemy(app)\r\n\r\n\r\nclass Todo(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n content = db.Column(db.String(512), nullable=False)\r\n date = db.Column(db.DateTime, default=datetime.utcnow())\r\n\r\n def __repr__(self) -> str:\r\n return '' % self.id\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n return render_template('login.html')\r\n\r\n\r\n@app.route('/signup', methods=['GET', 'POST'])\r\ndef signup():\r\n return render_template('signup.html')\r\n\r\n\r\n@app.route('/houseprice', methods=['GET', 'POST'])\r\ndef houseprice():\r\n return render_template('houseprice.html')\r\n\r\n\r\n@app.route('/detectemotion', methods=['GET', 'POST'])\r\ndef detectemotion():\r\n return render_template('detectemotion.html')\r\n\r\n\r\n@app.route('/upcoming')\r\ndef upcoming():\r\n return render_template('upcoming.html')\r\n\r\n\r\n@app.route('/todo', methods=['GET', 'POST'])\r\ndef todo():\r\n if request.method == 'POST':\r\n task_content = request.form['content']\r\n new_task = Todo(content=task_content)\r\n\r\n try:\r\n db.session.add(new_task)\r\n db.session.commit()\r\n return redirect('/todo')\r\n except:\r\n return 'Unfortunately your operation was unsuccessful.'\r\n else:\r\n tasks = Todo.query.order_by(Todo.date).all()\r\n return render_template('/todo.html', tasks=tasks)\r\n\r\n\r\n@app.route('/delete/')\r\ndef delete(id):\r\n task_to_delete = Todo.query.get_or_404(id)\r\n\r\n try:\r\n db.session.delete(task_to_delete)\r\n db.session.commit()\r\n return redirect('/todo')\r\n except:\r\n return 'There was a problem deleting that task'\r\n\r\n\r\n@app.route('/update/', methods=['GET', 'POST'])\r\ndef update(id):\r\n task = Todo.query.get_or_404(id)\r\n\r\n if request.method == 'POST':\r\n task.content = request.form['content']\r\n\r\n try:\r\n db.session.commit()\r\n return redirect('/todo')\r\n except:\r\n return 'There was an issue updating your task'\r\n\r\n else:\r\n return render_template('update.html', task=task)\r\n\r\n\r\ndef get_processed_data(arr):\r\n lst = list()\r\n lst.append(int(arr[0]))\r\n lst.append(int(arr[1]))\r\n lst.append(int(arr[2]))\r\n lst.append(1) if arr[3] == \"C (all)\" else lst.append(0)\r\n lst.append(1) if arr[3] == \"FV\" else lst.append(0)\r\n lst.append(1) if arr[3] == \"RH\" else lst.append(0)\r\n lst.append(1) if arr[3] == \"RL\" else lst.append(0)\r\n lst.append(1) if arr[3] == \"RM\" else lst.append(0)\r\n lst.append(1) if arr[4] == \"Grvl\" else lst.append(0)\r\n lst.append(1) if arr[4] == \"Pave\" else lst.append(0)\r\n lst.append(1) if arr[5] == \"IR1\" else lst.append(0)\r\n lst.append(1) if arr[5] == \"IR2\" else lst.append(0)\r\n lst.append(1) if arr[5] == \"IR3\" else lst.append(0)\r\n lst.append(1) if arr[5] == \"Reg\" else lst.append(0)\r\n lst.append(1) if arr[6] == \"Bnk\" else lst.append(0)\r\n lst.append(1) if arr[6] == \"HLS\" else lst.append(0)\r\n lst.append(1) if arr[6] == \"Low\" else lst.append(0)\r\n lst.append(1) if arr[6] == \"Lvl\" else lst.append(0)\r\n lst.append(1) if arr[7] == \"AllPub\" else lst.append(0)\r\n lst.append(1) if arr[7] == \"NoSeWa\" else lst.append(0)\r\n lst.append(1) if arr[8] == \"Abnorml\" else lst.append(0)\r\n lst.append(1) if arr[8] == \"AdjLand\" else lst.append(0)\r\n lst.append(1) if arr[8] == \"Alloca\" else lst.append(0)\r\n lst.append(1) if arr[8] == \"Family\" else lst.append(0)\r\n lst.append(1) if arr[8] == \"Normal\" else lst.append(0)\r\n lst.append(1) if arr[8] == \"Partial\" else lst.append(0)\r\n return lst\r\n\r\n\r\n@app.route('/predict_house_price', methods=['POST'])\r\ndef predict_house_price():\r\n model = pickle.load(open('./models/model1.pkl', 'rb'))\r\n scale = pickle.load(open('./models/scale.pkl', 'rb'))\r\n features = [x for x in request.form.values()]\r\n order = [2, 3, 0, 1, 4, 6, 7, 5, 8]\r\n features = [features[i] for i in order]\r\n x = features\r\n features = get_processed_data(features)\r\n final_features = np.array([features])\r\n scalled_X = final_features # scale.transform(final_features)\r\n print(scalled_X)\r\n prediction = model.predict(scalled_X)\r\n print(prediction)\r\n return render_template('price.html', Predicted_price=\"{:.2f} INR\".format(prediction[0][0]))\r\n\r\n\r\n@app.route('/detect_emtion', methods=['POST'])\r\ndef detect_emtion():\r\n # Save image from user\r\n file1 = request.files['img-up']\r\n path = os.path.join(app.config['UPLOAD_FOLDER'], 'img.jpg')\r\n file1.save(path)\r\n\r\n face_classifier = cv2.CascadeClassifier('./models/haarcascade.xml')\r\n classifier = load_model('./models/model.h5')\r\n class_labels = ['Angry', 'Disgust', 'Fear',\r\n 'Happy', 'Neutral', 'Sad', 'Surprise']\r\n cap = cv2.imread('./static/pictures/img.jpg')\r\n\r\n # Grab a single frame of video\r\n frame = cap\r\n labels = []\r\n gray = cv2.cvtColor(cap, cv2.COLOR_BGR2GRAY)\r\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\r\n label = None\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)\r\n\r\n if np.sum([roi_gray]) != 0:\r\n roi = roi_gray.astype('float')/255.0\r\n roi = img_to_array(roi)\r\n roi = np.expand_dims(roi, axis=0)\r\n\r\n # make a prediction on the ROI, then lookup the class\r\n\r\n preds = classifier.predict(roi)[0]\r\n label = class_labels[preds.argmax()]\r\n label_position = (x, y)\r\n cv2.putText(frame, label, label_position,\r\n cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)\r\n print(label)\r\n else:\r\n cv2.putText(frame, 'No Face Found', (20, 60),\r\n cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)\r\n\r\n milliseconds = int(round(time.time() * 1000))\r\n img_path = \"./static/pictures/img{}.jpg\".format(milliseconds)\r\n cv2.imwrite(img_path, cap)\r\n #\r\n image = cv2.imread(img_path)\r\n height, width = image.shape[:2]\r\n resized_image = cv2.resize(\r\n image, (3*width, 3*height), interpolation=cv2.INTER_CUBIC)\r\n print(label)\r\n return render_template('detected.html', image=img_path)\r\n\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(error):\r\n return render_template('404.html', title='404'), 404\r\n\r\n\r\n@app.after_request\r\ndef add_header(response):\r\n # response.cache_control.no_store = True\r\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\r\n response.headers['Pragma'] = 'no-cache'\r\n response.headers['Expires'] = '-1'\r\n return response\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=8080)\r\n","repo_name":"mistrysontu/cloud_project","sub_path":"main/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5312461106","text":"# 4012. [모의 SW 역량테스트] 요리사\nimport sys\nsys.stdin = open('4012input.txt', 'r')\ndef getSng(g):\n rtn = 0\n for i in range(N // 2 - 1):\n for j in range(i + 1, N // 2):\n rtn += (board[g[i]][g[j]] + board[g[j]][g[i]])\n return rtn\n\nfor tc in range(1, int(input()) + 1):\n N = int(input())\n board = [list(map(int, input().split())) for _ in range(N)]\n MIN = 0xffffff\n for i in range(1 << N):\n g1, g2 = [], []\n for j in range(N):\n if i & 1 << j: g1.append(j)\n else: g2.append(j)\n if len(g1) == N // 2:\n sng1 = getSng(g1)\n sng2 = getSng(g2)\n MIN = min(MIN, abs(sng1 - sng2))\n print('#{} {}'.format(tc, MIN))","repo_name":"banggeut01/algorithm","sub_path":"code/1114/4012.py","file_name":"4012.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18007883736","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 28 23:13:51 2015\n\n@author: alumno\n\"\"\"\n# Serie de Fibonacci\n'''\nserie = [1,1]\ncant_elementos = 20\nfor numero in range(cant_elementos-2):\n serie.append(serie[numero] + serie[numero+1])\nprint serie\n'''\n########\n'''\ncant_elementos = 20\nserie = ones(cant_elementos)\nfor numero in range(cant_elementos-2):\n serie[numero + 2] = serie[numero] + serie[numero + 1]\n \nprint serie\nprint \"Mide \" + str(len(serie))\n\ncocientes = [serie[k+1]/serie[k] for k in range(cant_elementos-1)]\nphi = (1+sqrt(5.0))/2 * ones(cant_elementos)\n\nfigure()\nplot(cocientes,'*')\nplot(phi)\nxlabel(\"Elemento\")\nylabel(\"Valor\")\n'''\n########\nimport numpy as np\nM = np.array([0,1,1,1]).reshape(2,2)\nphi1 = (1+np.sqrt(5))/2\nphi2 = (1-np.sqrt(5))/2\nC = np.array([1,1,phi1,phi2]).reshape(2,2)\nD = np.array([phi1,0,0,phi2]).reshape(2,2)\nvec0 = np.array([[1],[1]])\n\nn = int(input(\"Ingrese el número de elemento de la sucesión de Fibonacci que quiere conocer: \"))\ndef elementofibo(n):\n vecn = np.dot(np.dot(C,np.dot(D**(n-1),np.linalg.inv(C))),vec0)\n return vecn[0]\nnumn = elementofibo(n)\nprint(\"El %s-ésimo elemento de la sucesión de Fibonacci es %s.\"%(n,numn))\n \n","repo_name":"NickTrossa/Python","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72070236999","text":"alcool = 0\ngasolina = 0\ndiesel = 0\n\nwhile True:\n d = int(input(''))\n if d == 1:\n alcool = alcool + 1\n elif d == 2:\n gasolina = gasolina + 1\n elif d == 3:\n diesel = diesel + 1\n\n elif d == 4:\n break\n\nprint('MUITO OBRIGADO')\nprint('Alcool: {}'.format(alcool))\nprint('Gasolina: {}'.format(gasolina))\nprint('Diesel: {}'.format(diesel))","repo_name":"danielnascimentotomaz/PLATAFORMA-BEECROWD-PYTHON","sub_path":"LISTA DE PROBLEMA RESOLVIDO/134 TIPO DE COMBUSTIVEL.py","file_name":"134 TIPO DE COMBUSTIVEL.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34803619660","text":"import random\n\ndef start_game():\n\n plays = [\"R\", \"P\", \"S\"]\n\n user_play = input('Please choose a play, please enter, R, P, S: ').upper()\n\n while user_play:\n if user_play in plays:\n break\n else:\n print('Please enter a valid play option')\n user_play = input('Please choose a play, please enter, R, P, S: ').upper()\n\n cpu_play = random.choice(plays)\n\n if user_play == \"R\":\n user_play = \"Rock\"\n elif user_play == \"P\":\n user_play = \"Paper\"\n elif user_play == \"S\":\n user_play = \"Scissors\"\n\n if cpu_play == \"R\":\n cpu_play = \"Rock\"\n elif cpu_play == \"P\":\n cpu_play = \"Paper\"\n elif cpu_play == \"S\":\n cpu_play = \"Scissors\"\n\n print(f'Player ({user_play}) : CPU ({cpu_play})')\n\n if user_play == \"Rock\" and cpu_play == \"Scissors\":\n print('You win!!!')\n elif user_play == \"Scissors\" and cpu_play == \"Rock\":\n print('CPU wins!!!')\n elif user_play == \"Rock\" and cpu_play == \"Paper\":\n print('CPU wins!!!')\n elif user_play == \"Paper\" and cpu_play == \"Rock\":\n print('You win!!!')\n elif user_play == \"Scissors\" and cpu_play == \"Paper\":\n print('You win!!!')\n elif user_play == \"Paper\" and cpu_play == \"Scissors\":\n print('CPU wins!!!')\n elif user_play == cpu_play:\n print('Its a tie')\n start_game()\n\nstart_game()\n","repo_name":"grillzwitu/rock_paper_scissors","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5271963232","text":"from PIL import Image\nfrom .. import constants as cs\nfrom fastapi.responses import JSONResponse\nfrom fastapi import status\nimport os\n\n\ndef split_tiff_service(file):\n try:\n tiff_file = Image.open(file)\n tiff_file_name = os.path.basename(file).split(\".\")[0]\n for index in range(tiff_file.n_frames):\n tiff_file.seek(index)\n tiff_file_path = os.path.join(cs.IMG_DIR, f\"{tiff_file_name}-{index}.jpg\")\n tiff_file.save(tiff_file_path, \"JPEG\")\n except Exception as e:\n return JSONResponse(\n content={\n \"message\": str(e) \n },\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n )\n else:\n return JSONResponse(\n content={\n \"message\": \"success\"\n },\n status_code=status.HTTP_200_OK\n )\n","repo_name":"CKVB/Pdf-Tiff-Converter","sub_path":"App/services/split_tiff_service.py","file_name":"split_tiff_service.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"34275570131","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n\ndef preOrderTraversal(root):\n if(root is None):\n return\n\n print(root.data, end=\" \")\n preOrderTraversal(root.left)\n preOrderTraversal(root.right)\n\ndef inOrderTraversal(root):\n if (root is None):\n return\n\n inOrderTraversal(root.left)\n print(root.data, end=\" \")\n inOrderTraversal(root.right)\n\ndef postOrderTraversal(root):\n if (root is None):\n return\n\n postOrderTraversal(root.left)\n postOrderTraversal(root.right)\n print(root.data, end=\" \")\n\ndef sizeOfBT(root):\n if(root is None):\n return 0\n\n return sizeOfBT(root.left)+sizeOfBT(root.right)+1\n # lside = sizeOfBT(root.left)\n # rside = sizeOfBT(root.right)\n # return lside+rside+1\n\ndef main():\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(4)\n root.left.right = Node(5)\n root.right.left = Node(6)\n root.left.left.left = Node(7)\n root.left.left.right = Node(8)\n root.left.left.right.left = Node(9)\n root.left.left.right.left.left = Node(10)\n print(sizeOfBT(root))\n\n\nif __name__=='__main__':\n main()\n\n","repo_name":"Simplysoumen/PrepBytes","sub_path":"Tree/SizeOfBinaryTree.py","file_name":"SizeOfBinaryTree.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"19898509889","text":"import numpy as np\ndef combined_mean_std(means: np.array, stds: np.array, counts:np.array)->tuple:\n \"\"\"[calculates combined statistics]\n Args:\n means (np.array): [mean array of all distributions]\n stds (np.array): [standart deviations array of all distributions]\n counts (np.array): [list of num of elements for all distributions ]\n Returns:\n [tuple]: [combined_means, combined_stds]\n \"\"\"\n N = np.sum(counts)\n # combined Mean\n Mc = np.sum([m*counts[i] for i, m in enumerate(means)]) / np.sum(counts)\n SumX = np.sum(means*counts)\n SumX2 = np.sum(counts*means**2 + (counts)*stds**2)\n SDc = np.sqrt(np.abs((SumX2 - SumX**2/N))/(N))\n return Mc, SDc\n\n","repo_name":"VachikKh/calculate_combined_statistics","sub_path":"calculate_combined_statistics.py","file_name":"calculate_combined_statistics.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24078630094","text":"n=int(input())\nsum=0\nwhile n!=0:\n x=n\n res=1\n while x<2*n+1:\n res=res*x # 1\n x=x+1\n sum+=res\n n=n-1\nprint(sum)","repo_name":"Arshidin9856/python2","sub_path":"2 HW/5 var,for monday/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29297442683","text":"import mindspore.nn as nn\r\nimport mindspore.ops as ops\r\nimport mindspore\r\n# from mindspore.nn.transformer import OpParallelConfig\r\nfrom mindspore import context, Tensor, Parameter\r\nimport mindspore.common.dtype as mstype\r\nfrom mindspore.ops import operations as P\r\n\r\nimport numpy as np\r\n\r\ncontext.set_context(mode=context.PYNATIVE_MODE)\r\n\r\n\r\ndef softmax(x, axis=1):\r\n \"\"\" softmax function \"\"\"\r\n\r\n # assert(len(x.shape) > 1, \"dimension must be larger than 1\")\r\n # print(np.max(x, axis = 1, keepdims = True)) # axis = 1, 行\r\n\r\n x -= np.max(x, axis=axis, keepdims=True) # 为了稳定地计算softmax概率, 一般会减掉最大的那个元素\r\n\r\n # print(\"减去行最大值 :\\n\", x)\r\n\r\n x = np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True)\r\n\r\n return x\r\n\r\n\r\nclass Softmax(nn.Cell):\r\n def __init__(self, axis=-1):\r\n super(Softmax, self).__init__()\r\n self.axis = axis\r\n self.max = P.ReduceMax(keep_dims=True)\r\n self.sum = P.ReduceSum(keep_dims=True)\r\n self.sub = P.Sub()\r\n self.exp = P.Exp()\r\n self.div = P.RealDiv()\r\n self.cast = P.Cast()\r\n\r\n def construct(self, x):\r\n x = self.cast(x, mstype.float32)\r\n x = self.sub(x, self.max(x, self.axis))\r\n x = self.div(self.exp(x), self.sum(self.exp(x), self.axis))\r\n return x\r\n\r\n\r\nif __name__ == '__main__':\r\n x = np.random.randint(low=1, high=5, size=(2, 3)) # 生成一个2x3的矩阵,取值范围在1-5之间\r\n print(\"原始 :\\n\", x)\r\n\r\n x_ = softmax(x.copy())\r\n print(\"变换后 :\\n\", x_)\r\n softmax_ = Softmax(axis=1)\r\n x_tensor = Tensor(x, mstype.float32)\r\n print(x_tensor)\r\n x_out = softmax_(x_tensor)\r\n print(x_out)\r\n x_out1 = ops.Softmax(axis=1)(x_tensor)\r\n print(x_out1)\r\n","repo_name":"mindspore-lab/models","sub_path":"research/xidian/CLAN/utils/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"16520367630","text":"import pytest\n\nfrom catalog.Product import get_clean_name\n\nproduct_titles = [\n (\"Lifesavers Bananas | 160g\", \"Lifesavers Bananas\"),\n]\n\n\n@pytest.mark.parametrize(\"input_name, expected\", product_titles)\ndef test_clean_name(input_name, expected):\n name = input_name\n clean_name = get_clean_name(name)\n assert clean_name == expected\n","repo_name":"AlmightyKratos/catalog","sub_path":"tests/test_clean_name.py","file_name":"test_clean_name.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18221832883","text":"#Exemplo identação\n#def sacar(valor):\n# saldo = 500 \n# if saldo >= valor:\n# print('saque concluído') \n#sacar(100)\n\n#if ternario\nsaldo = 500\nsaque = 100\n\nstatus = \"Sucesso\" if saldo >= saque else \"Falha\"\nprint(f'{status} ao realizar saque')\n\n#estruturas de repetição\ntexto = input('Informe um texto')\nVOGAIS = 'AEIOU'\n\nfor letra in texto:\n if letra.upper() in VOGAIS:\n print (letra, end='')\n \nprint() #adiciona uma quebra de linha\n","repo_name":"ribeirorafaela/Projeto-Data-Science-100-dias","sub_path":"Notebooks e planilhas/dia_19.py","file_name":"dia_19.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73333290116","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nfrom hp_tools import get_houses, get_features\nfrom tools import preprocess\nfrom LogisticRegressionOVA import LogisticRegressionOVA\n\n\nif __name__ == \"__main__\":\n\tfeatures = get_features()\n\tfile = \"res/dataset_train.csv\"\n\tdf = preprocess(pd.read_csv(file, index_col=\"Index\"))\n\n\ty_label = \"Hogwarts House\"\n\tX = df.loc[:, features]\n\tX_test = X.sample(frac=0.3)\n\tX_train = X.loc[X.index.difference(X_test.index.values)]\n\ty_test, y_train = df.loc[X_test.index.values][y_label], df.loc[X_train.index.values][y_label]\n\n\tall = get_houses()\n\tlog_reg = LogisticRegressionOVA().fit(X_train, y_train, one_vs_all=all)\n\tlog_reg.save_classifier_into(\"classifier.npy\")\n\tscore = log_reg.score(X_test, y_test, one_vs_all=all)\n\tprint(\"My score = {}\".format(round(score, 2)))\n","repo_name":"f-huang/logistic_regression","sub_path":"logreg_train_test_split.py","file_name":"logreg_train_test_split.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30025345758","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns=[\n path(\"\",views.dashboard,name='dashboard'),\n path(\"list/\",views.list, name='list'),\n path(\"create-list\",views.createList, name='create-list'),\n path(\"delete-list/\",views.deleteList, name='delete-list'),\n path(\"share-list/\",views.shareList, name='share-list'),\n path(\"delete-task/\",views.deleteTask,name='delete-task'),\n]","repo_name":"JacekHordyj/sharedo","sub_path":"lists/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13689417085","text":"from django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom reservation.models import Reservation\nfrom .forms import PaymentForm\nfrom .models import Transaction\nfrom django.utils.decorators import method_decorator\nfrom registration.decorators import user_is_confirmed\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\n\n\n@method_decorator([login_required, user_is_confirmed], name='dispatch')\nclass PaymentView(View):\n template_name = 'bank.html'\n\n def get(self, request, *args, **kwargs):\n res_id = kwargs['resid']\n amount = get_object_or_404(Reservation, id=res_id)\n amount = amount.total_price\n return render(request, self.template_name, {'amount': amount, 'reservation_id': res_id})\n\n def post(self, request, *args, **kwargs):\n reservation_id = kwargs['resid']\n form = PaymentForm(request.POST)\n if form.is_valid():\n success = form.cleaned_data.get('success')\n if success == 'success':\n success = True\n else:\n success = False\n reservation = get_object_or_404(Reservation, pk=reservation_id)\n Transaction.objects.create(is_successful=success, reservation=reservation)\n acc_id = reservation.roominfo.first().room.accommodation.pk\n if success:\n messages.success(request, 'پرداخت شما با موفقیت انجام شد.')\n return redirect('user_reserve')\n else:\n messages.error(request, 'پرداخت شما با موفقیت انجام نشد. دوباره تلاش کنید.')\n return redirect(reverse('accommodation_detail', kwargs={'pk': acc_id}))\n","repo_name":"mahooly/Accommodation-Reservation-and-Management-Project","sub_path":"Code/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8937209121","text":"import logging\n\nfrom taskflow.patterns import linear_flow\n\nfrom pumphouse import task\nfrom pumphouse import events\nfrom pumphouse import exceptions\nfrom pumphouse.tasks import utils as task_utils\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass RetrieveFloatingIP(task.BaseCloudTask):\n def execute(self, address):\n floating_ip = self.cloud.nova.floating_ips_bulk.find(address=address)\n return floating_ip.to_dict()\n\n\nclass EnsureFloatingIPBulk(task.BaseCloudTask):\n def execute(self, floating_ip_info):\n address = floating_ip_info[\"address\"]\n pool = floating_ip_info[\"pool\"]\n try:\n floating_ip = self.cloud.nova.floating_ips_bulk.find(\n address=address)\n except exceptions.nova_excs.NotFound:\n self.cloud.nova.floating_ips_bulk.create(address,\n pool=pool)\n try:\n floating_ip = self.cloud.nova.floating_ips_bulk.find(\n address=address)\n except exceptions.nova_excs.NotFound:\n LOG.exception(\"Not added: %s\", address)\n self.not_added_event(address)\n raise\n else:\n LOG.info(\"Created: %s\", floating_ip.to_dict())\n self.created_event(floating_ip)\n else:\n LOG.warn(\"Already exists, %s\", floating_ip.to_dict())\n return floating_ip.to_dict()\n\n def created_event(self, floating_ip):\n events.emit(\"create\", {\n \"id\": floating_ip.address,\n \"type\": \"floating_ip\",\n \"cloud\": self.cloud.name,\n \"data\": dict(floating_ip.to_dict(),\n name=floating_ip.address),\n }, namespace=\"/events\")\n\n def not_added_event(self, address):\n events.emit(\"error\", {\n \"message\": \"FloatingIpsBulk {} was not created, next attempt\"\n .format(address),\n }, namespace=\"/events\")\n\n\nclass EnsureFloatingIP(task.BaseCloudTask):\n # TODO(ogelbukh): this task must be refactored in a way that replaces a\n # while loop with built-in retry mechanism of Taskflow lib\n def execute(self, server_info, floating_ip_info, fixed_ip_info):\n floating_ip_address = floating_ip_info[\"address\"]\n fixed_ip_address = fixed_ip_info[\"v4-fixed-ip\"]\n server_id = server_info[\"id\"]\n try:\n floating_ip = self.cloud.nova.floating_ips_bulk.find(\n address=floating_ip_address)\n except exceptions.nova_excs.NotFound:\n LOG.exception(\"No Floating IP: %s\",\n floating_ip_address)\n raise\n if floating_ip.instance_uuid is None:\n tries = []\n while len(tries) in range(30):\n try:\n # FIXME(ogelbukh): pass fixed ip address to bind to,\n # requires retention of network information for server\n self.cloud.nova.servers.add_floating_ip(\n server_id, floating_ip_address, None)\n except exceptions.nova_excs.BadRequest as exc:\n tries.append(exc)\n pass\n else:\n floating_ip = self.cloud.nova.floating_ips_bulk.find(\n address=floating_ip_address)\n LOG.info(\"Assigned floating ip: %s\",\n floating_ip.to_dict())\n self.assigned_event(floating_ip_address, server_id)\n return floating_ip.to_dict()\n else:\n LOG.exception(\"Unable to add floating ip: %s\",\n floating_ip.to_dict())\n self.assigning_error_event(floating_ip_address, server_id)\n raise exceptions.TimeoutException()\n elif floating_ip.instance_uuid == server_id:\n LOG.warn(\"Already associated: %s\", floating_ip)\n return floating_ip.to_dict()\n else:\n LOG.exception(\"Duplicate association: %s\", floating_ip)\n raise exceptions.Conflict()\n\n def assigned_event(self, address, server_id):\n events.emit(\"update\", {\n \"id\": address,\n \"type\": \"floating_ip\",\n \"cloud\": self.cloud.name,\n \"data\": {\n \"server_id\": server_id,\n }\n }, namespace=\"/events\")\n\n def assigning_error_event(self, address, server_id):\n events.emit(\"error\", {\n \"message\": \"Couldn't assign FloatingIp {} to Server {}\"\n .format(address, server_id),\n }, namespace=\"/events\")\n\n\ndef migrate_floating_ip(context, address):\n \"\"\"Replicate Floating IP from source cloud to destination cloud\"\"\"\n floating_ip_binding = \"floating-ip-{}\".format(address)\n floating_ip_retrieve = \"floating-ip-{}-retrieve\".format(address)\n floating_ip_bulk_ensure = \"floating-ip-bulk-{}-ensure\".format(address)\n flow = linear_flow.Flow(\"migrate-floating-ip-{}\".format(address))\n flow.add(RetrieveFloatingIP(context.src_cloud,\n name=floating_ip_binding,\n provides=floating_ip_binding,\n rebind=[floating_ip_retrieve]))\n flow.add(EnsureFloatingIPBulk(context.dst_cloud,\n name=floating_ip_bulk_ensure,\n provides=floating_ip_bulk_ensure,\n rebind=[floating_ip_binding]))\n context.store[floating_ip_retrieve] = address\n return flow\n\n\ndef associate_floating_ip_server(context, floating_ip_address,\n fixed_ip_info, server_id):\n \"\"\"Associates Floating IP to Nova instance\"\"\"\n floating_ip_bulk_ensure = \"floating-ip-bulk-{}-ensure\".format(\n floating_ip_address)\n floating_ip_sync = \"floating-ip-{}-{}-sync\".format(server_id,\n floating_ip_address)\n fixed_ip_address = fixed_ip_info[\"addr\"]\n fixed_ip_nic = \"fixed-ip-{}-nic\".format(fixed_ip_address)\n server_boot = \"server-{}-boot\".format(server_id)\n floating_ip_ensure = \"floating-ip-{}-ensure\".format(floating_ip_address)\n flow = linear_flow.Flow(\"associate-floating-ip-{}-server-{}\"\n .format(floating_ip_address, server_id))\n flow.add(task_utils.SyncPoint(name=floating_ip_sync,\n requires=[floating_ip_bulk_ensure,\n server_boot]))\n flow.add(EnsureFloatingIP(context.dst_cloud,\n name=floating_ip_ensure,\n provides=floating_ip_ensure,\n rebind=[server_boot,\n floating_ip_bulk_ensure,\n fixed_ip_nic]))\n return flow\n","repo_name":"UshF/pumphouse","sub_path":"pumphouse/tasks/network/nova/floating_ip.py","file_name":"floating_ip.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73966730118","text":"#!/usr/bin/env python3\n\n# https://leetcode-cn.com/problems/shortest-completing-word\n# 如果单词列表(words)中的一个单词包含牌照(licensePlate)中所有的字母,那么我们称之为完整词。\n# 在所有完整词中,最短的单词我们称之为最短完整词。\n# 单词在匹配牌照中的字母时不区分大小写,比如牌照中的 \"P\" 依然可以匹配单词中的 \"p\" 字母。\n# 我们保证一定存在一个最短完整词。当有多个单词都符合最短完整词的匹配条件时取单词列表中最靠前的一个。\n# 牌照中可能包含多个相同的字符,比如说:对于牌照 \"PP\",单词 \"pair\" 无法匹配,但是 \"supper\" 可以匹配。\n#\n# 示例 1:\n# 输入:licensePlate = \"1s3 PSt\", words = [\"step\", \"steps\", \"stripe\", \"stepple\"]\n# 输出:\"steps\"\n# 说明:最短完整词应该包括 \"s\"、\"p\"、\"s\" 以及 \"t\"。对于 \"step\" 它只包含一个 \"s\" 所以它不符合条件。同时在匹配过程中我们忽略牌照中的大小写。\n#  \n# 示例 2:\n# 输入:licensePlate = \"1s3 456\", words = [\"looks\", \"pest\", \"stew\", \"show\"]\n# 输出:\"pest\"\n# 说明:存在 3 个包含字母 \"s\" 且有着最短长度的完整词,但我们返回最先出现的完整词。\n#\n# 注意:\n# 牌照(licensePlate)的长度在区域[1, 7]中。\n# 牌照(licensePlate)将会包含数字、空格、或者字母(大写和小写)。\n# 单词列表(words)长度在区间 [10, 1000] 中。\n# 每一个单词 words[i] 都是小写,并且长度在区间 [1, 15] 中。\n\n\nclass Solution:\n def shortestCompletingWord(self, licensePlate: str, words: [str]) -> str:\n allChDic = dict()\n for ch in licensePlate.lower():\n if 'a' <= ch <= 'z':\n allChDic[ch] = allChDic.get(ch, 0) + 1\n res = list()\n for wd in words:\n temp = allChDic.copy()\n for ch in wd:\n if temp.get(ch) is not None:\n temp[ch] = temp[ch] - 1\n if temp[ch] == 0:\n del temp[ch]\n if len(temp) == 0:\n res.append(wd)\n return min(res, key=len)\n\n\nprint(Solution().shortestCompletingWord(\"1s3 PSt\", [\"step\", \"steps\", \"stripe\", \"stepple\"])) # steps\nprint(Solution().shortestCompletingWord(\"1s3 456\", [\"looks\", \"pest\", \"stew\", \"show\"])) # pest\n","repo_name":"HeDefine/LeetCodePractice","sub_path":"Q748.最短完整词.py","file_name":"Q748.最短完整词.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71266427719","text":"import numpy as np\nfrom numpy import pi, sin, cos\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\n\ndef walk_one_dim() : \n \"\"\" gives a visual representation of the one dimensional random walk with N steps \"\"\"\n global N, prob\n walk = np.array([np.float64(x) for x in range(0)])\n s = 0\n for i in range(N) : # loops through the number of steps\n r = np.random.rand() # random number to decide whether the walker goes left or right\n s += 1*(rprob) # describes the walk\n walk = np.append(walk, [s])\n steps = np.linspace(0, N, N)\n plt.scatter(steps, walk, alpha=0.5, s=0.1, color='blue')\n plt.title(\"1D random walk with {} steps and {} probability to go up\".format(N, prob))\n plt.xlabel(\"steps\")\n plt.ylabel(\"position\")\n plt.show()\n #plt.savefig('random_walk_1D.png', dpi=1200)\n\ndef walk_two_dim() :\n \"\"\" gives a visual representation of the two dimensional random walk with N steps \"\"\"\n global N\n walk = np.zeros((N,2))\n x, y = 0, 0\n for i in range(1, N) :\n t = np.random.rand()\n x += cos(2*pi*t)\n y += sin(2*pi*t)\n walk[i][0] = x\n walk[i][1] = y\n plt.plot(walk[:,0], walk[:,1], linewidth=0.2, color='blue')\n plt.scatter(walk[:,0], walk[:,1], s=0.1, color='black')\n plt.title(\"2D random walk with {} steps\".format(N))\n #plt.savefig('random_walk_2D.png', dpi=1200)\n plt.show()\n\n\ndef walk_three_dim() :\n \"\"\" gives a visual representation of the three dimensional random walk with N steps \"\"\"\n global N\n walk = np.zeros((N,3))\n x, y, z = 0, 0, 0\n for i in range(1, N) :\n tht = np.random.rand()\n phi = np.random.rand()\n x += cos(2*pi*phi)*sin(pi*tht)\n y += sin(2*pi*phi)*sin(pi*tht)\n z += cos(pi*tht)\n walk[i][0] = x\n walk[i][1] = y\n walk[i][2] = z\n ax = plt.axes(projection='3d')\n ax.plot3D(walk[:,0], walk[:,1], walk[:,2], linewidth=0.2, color='blue')\n #ax.scatter3D(walk[:,0], walk[:,1], walk[:,2], s=0.1, color='black')\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n ax.set_title(\"3D random walk with {} steps\".format(N))\n plt.show()\n \n\nif __name__ == \"__main__\" :\n # parameters\n N = int(input(\"Enter the number of steps : \")) # (maximum) number of steps\n prob = 0.5 # float(input(\"Enter the probability to move to the right : \")) # probability of taking a step to the right\n dim = int(input(\"Enter the number of dimensions (possible values = 1, 2 or 3) : \"))\n if dim == 1 :\n walk_one_dim()\n if dim == 2 :\n walk_two_dim()\n if dim == 3 :\n walk_three_dim()","repo_name":"physmath17/random_walk","sub_path":"visualizing_random_walks.py","file_name":"visualizing_random_walks.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43103224770","text":"# Read Number in Chinese (25)\n# https://www.nowcoder.com/pat/1/problem/4312\n# 时间限制 1000 ms 内存限制 65536 KB 代码长度限制 100 KB 判断程序 Standard (来自 小小)\n# 题目描述\n# Given an integer with no more than 9 digits, you are supposed to read it in the traditional Chinese way. Output \"Fu\" first if it is negative. For example, -123456789 is read as \"Fu yi Yi er Qian san Bai si Shi wu Wan liu Qian qi Bai ba Shi jiu\". Note: zero (\"ling\") must be handled correctly according to the Chinese tradition. For example, 100800 is \"yi Shi Wan ling ba Bai\".\n#\n# 输入描述:\n# Each input file contains one test case, which gives an integer with no more than 9 digits.\n#\n#\n# 输出描述:\n# For each test case, print in a line the Chinese way of reading the number. The characters are separated by a space and there must be no extra space at the end of the line.\n#\n# 输入例子:\n# -123456789\n#\n# 输出例子:\n# Fu yi Yi er Qian san Bai si Shi wu Wan liu Qian qi Bai ba Shi jiu\n\n\nname_value = {'0': 'ling', '1': 'yi', '2': 'er', '3': 'san', '4': 'si', '5': 'wu', '6': 'liu', '7': 'qi', '8': 'ba',\n '9': 'jiu'}\n\n\ndef four_number(num, header=False):\n unit_value = ['', ' Shi', ' Bai', ' Qian']\n num_value = int(num)\n num = str(num_value)\n if num_value == 0:\n return ['ling', ]\n to_return = []\n if num_value < 1000 and header:\n to_return.append('ling')\n first_flag = True\n for i in range(0, len(num)):\n # first occur non-zero number\n if num[-i - 1] != '0' and first_flag:\n first_flag = False\n if not first_flag:\n if num[-i - 1] == '0':\n to_insert = 'ling'\n else:\n to_insert = name_value[num[-i - 1]] + unit_value[i]\n if len(to_return) == 0 or not (to_insert == to_return[0] == 'ling'):\n to_return.insert(int(num_value < 1000 and header), to_insert)\n return to_return\n\n\nnumber = str(int(input()))\nif number == '0':\n print('ling')\nelse:\n negative = number[0] == '-'\n if negative:\n number = number[1:]\n unit_value_2 = ['', 'Wan', 'Yi']\n to_display = []\n count = 0\n while True:\n flag = True\n if len(number) < 4:\n flag = False\n if number == '':\n break\n t = number[-4:] if len(number) > 4 else number\n part = four_number(t, flag)\n if len(to_display) == 0 and part == ['ling', ]:\n pass\n elif not (len(to_display) > 0 and to_display[0] == part[0] == 'ling'):\n to_display = part + ([unit_value_2[count], ] if int(t) > 0 else []) + to_display\n if not flag:\n break\n number = number[:-4]\n count += 1\n\n print(('Fu ' if negative else '') + ' '.join(to_display).strip())\n","repo_name":"novioleo/Exercise","sub_path":"Read Number In Chinese.py","file_name":"Read Number In Chinese.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"35013485549","text":"from selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\n\nclass SearchResults:\n\n def __init__(self, driver):\n self.driver = driver\n\n webview = self.driver.instance.contexts[1]\n self.driver.instance.switch_to.context(webview)\n self.img_button = WebDriverWait(self.driver.instance, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#hdtb-msb > div:nth-child(3) > a'))\n )\n\n def change_to_img(self):\n self.img_button.click()\n\n","repo_name":"MattV-Fayesg/test_chrome_automation","sub_path":"objects/search_results.py","file_name":"search_results.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33002298818","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass AODnet(nn.Module):\n def __init__(self):\n super(AODnet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1, stride=1, padding=0)\n self.conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=5, stride=1, padding=2)\n self.conv4 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=7, stride=1, padding=3)\n self.conv5 = nn.Conv2d(in_channels=12, out_channels=3, kernel_size=3, stride=1, padding=1)\n self.b = 1\n\n def forward(self, x):\n x1 = F.relu(self.conv1(x))\n x2 = F.relu(self.conv2(x1))\n cat1 = torch.cat((x1, x2), 1)\n x3 = F.relu(self.conv3(cat1))\n cat2 = torch.cat((x2, x3), 1)\n x4 = F.relu(self.conv4(cat2))\n cat3 = torch.cat((x1, x2, x3, x4), 1)\n k = F.relu(self.conv5(cat3))\n\n if k.size() != x.size():\n raise Exception(\"k, haze image are different size!\")\n\n output = k * x - k + self.b\n return F.relu(output)\n\nclass AOD_pono_net(nn.Module):\n def __init__(self):\n super(AOD_pono_net, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1, stride=1, padding=0)\n self.conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)\n self.conv3 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=5, stride=1, padding=2)\n self.conv4 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=7, stride=1, padding=3)\n self.conv5 = nn.Conv2d(in_channels=12, out_channels=3, kernel_size=3, stride=1, padding=1)\n self.b = 1\n\n self.pono = PONO(affine=False)\n self.ms = MS()\n\n def forward(self, x):\n x1 = F.relu(self.conv1(x))\n x2 = F.relu(self.conv2(x1))\n cat1 = torch.cat((x1, x2), 1)\n x1, mean1, std1 = self.pono(x1)\n x2, mean2, std2 = self.pono(x2)\n x3 = F.relu(self.conv3(cat1))\n cat2 = torch.cat((x2, x3), 1)\n x3 = self.ms(x3, mean1, std1)\n x4 = F.relu(self.conv4(cat2))\n x4 = self.ms(x4, mean2, std2)\n cat3 = torch.cat((x1, x2, x3, x4), 1)\n k = F.relu(self.conv5(cat3))\n\n if k.size() != x.size():\n raise Exception(\"k, haze image are different size!\")\n\n output = k * x - k + self.b\n return F.relu(output)\n\nclass PONO(nn.Module):\n def __init__(self, input_size=None, return_stats=False, affine=True, eps=1e-5):\n super(PONO, self).__init__()\n self.return_stats = return_stats\n self.input_size = input_size\n self.eps = eps\n self.affine = affine\n\n if affine:\n self.beta = nn.Parameter(torch.zeros(1, 1, *input_size))\n self.gamma = nn.Parameter(torch.ones(1, 1, *input_size))\n else:\n self.beta, self.gamma = None, None\n\n def forward(self, x):\n mean = x.mean(dim=1, keepdim=True)\n std = (x.var(dim=1, keepdim=True) + self.eps).sqrt()\n x = (x - mean) / std\n if self.affine:\n x = x * self.gamma + self.beta\n return x, mean, std\n\nclass MS(nn.Module):\n def __init__(self, beta=None, gamma=None):\n super(MS, self).__init__()\n self.gamma, self.beta = gamma, beta\n\n def forward(self, x, beta=None, gamma=None):\n beta = self.beta if beta is None else beta\n gamma = self.gamma if gamma is None else gamma\n if gamma is not None:\n x.mul_(gamma)\n if beta is not None:\n x.add_(beta)\n return x\n","repo_name":"Boyiliee/AOD-Net","sub_path":"AOD-Net with PONO/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"62"} +{"seq_id":"32283816121","text":"from classes import Car\n\n# from classes import Person\n#\n# person1 = Person('John')\n# person1.print_info()\n#\n# person2 = Person('Katy')\n# # print(person2._Person__age)\n# # print(person2.get_age())\n# # person2.set_age(22)\n# print(person2.age)\n# person2.age = 36\n# person2.print_info()\n\n\nmustang = Car('Mustang')\nmustang.hp = 3\nmustang.car_info()\n\nporsche = Car('Porsche')\nporsche.hp = 200\nporsche.year = 1950\nporsche.color = 'Grey'\nporsche.car_info()\n\n\n\n\n\n","repo_name":"SanChoysGitHub/CourseHunter","sub_path":"folder/lesson37.py","file_name":"lesson37.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71954137479","text":"'''\n实现代理池的检测模块\n目的:检查代理IP可用性,保证代理池中代理IP基本可用\n思路\n1.在proxy_test.py中,创建ProxyTester类\n2.提供一个run 方法,用于处理检测代理命核心逻辑\n 从数据库中获取所有代理IP\n 遍历代理IP列表\n 检查代理可用性\n 如果代理不可用,让代理分数-1,如果代理分数等于0就从数据库中删除该代理,否则更新该代理IP\n 如果代理可用,就恢复该代理的分数,更新到数据库中\n3.为了提高检查的速度,使用异步来执行检测任务\n 在init方法中,创建队列和协程池\n 把要检测的代理IP,放到队列中\n i.把检查一个代理可用性的代码,抽取到一个方法中;从队列中获取代理IP,进行检查;检查完毕,调度队列的task_done方法\n ii.通过异步回调,使用死循环不断执行这个方法,\n iii.开启多个一个异步任务,来处理代理IP的检测;可以通过配置文件指定异步数量\n 调用队列的join方法,让当前线程等待队列任务完成\n4.使用schedule模块,每隔一定的时间,执行一次检测任务\n 定义类方法start,用于启动检测模块\n 在start方法中\n i.创建本类对象\n ii.调用run方法\n iii.每间隔一定时间,执行一下,run方法\n'''\n\nfrom core.db.mongo_pool import MongoPool\nfrom core.proxy_validate.httpbin_validator import check_proxy\nfrom settings import MAX_SCORE, TEST_PROXIES_ASYNC_COUNT, RUN_TEST_PROXIES_INTERVAL\n\nfrom gevent import monkey\nmonkey.patch_all()\nfrom gevent.pool import Pool\nfrom queue import Queue\nimport schedule\nimport time\n\nclass ProxyTester(object):\n\n def __init__(self):\n # 创建操作数据库的MongoPool对象\n self.mongo_pool = MongoPool()\n # 创建队列和协程池\n self.coroutine_pool = Pool()\n self.queue = Queue()\n\n def __check_callback(self, temp):\n self.coroutine_pool.apply_async(self.__check_one_proxy, callback=self.__check_callback)\n\n def run(self):\n # 从数据库中获取所有代理IP\n proxies = self.mongo_pool.find_all()\n # 遍历代理IP列表\n for proxy in proxies:\n # 检查代理可用性\n # self.__check_one_proxy(proxy)\n # 把要检测的代理IP,放到队列中\n self.queue.put(proxy)\n\n # 开启多个一个异步任务,来处理代理IP的检测\n for i in range(TEST_PROXIES_ASYNC_COUNT):\n # 通过异步回调,使用死循环不断执行这个方法\n self.coroutine_pool.apply_async(self.__check_one_proxy, callback=self.__check_callback)\n\n self.queue.join()\n\n def __check_one_proxy(self):\n \"\"\"检测一个代理IP的可用性\"\"\"\n\n # 从队列中获取代理IP,进行检查\n proxy = self.queue.get()\n\n proxy = check_proxy(proxy)\n if proxy.speed == -1:\n # 如果代理不可用,让代理分数 - 1,\n proxy.score -= 1\n if proxy.score == 0:\n # 如果代理分数等于0就从数据库中删除该代理,\n self.mongo_pool.delete_one(proxy)\n else:\n # 否则更新该代理IP\n self.mongo_pool.update_one(proxy)\n else:\n # 如果代理可用,就恢复该代理的分数,更新到数据库中\n proxy.score = MAX_SCORE\n self.mongo_pool.update_one(proxy)\n\n # 检查完毕,调度队列的task_done方法\n self.queue.task_done()\n\n @classmethod\n def start(cls):\n # 创建本类对象\n pt = ProxyTester()\n # 调用run方法\n pt.run()\n\n # 每间隔一定时间,执行一下,run方法\n schedule.every(RUN_TEST_PROXIES_INTERVAL).hours.do(pt.run)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\nif __name__ == '__main__':\n # pt = ProxyTester()\n # pt.run()\n\n ProxyTester.start()","repo_name":"Stark-Xue/pachong","sub_path":"IPProxyPool/core/proxy_test.py","file_name":"proxy_test.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19714971276","text":"\"\"\"\n@author: Arkan M. Gerges\n\"\"\"\n\nfrom sqlalchemy import Column, Integer, ForeignKey, Table\n\nimport src.port_adapter.AppDi as AppDi\n\nBase = AppDi.instance.get(AppDi.DbBase)\nUSER__ROLE__JUNCTION = \"user__role__junction\"\nassociationTable = Table(\n USER__ROLE__JUNCTION,\n Base.metadata,\n Column(\n \"user_id\",\n Integer,\n ForeignKey(\"user.id\", ondelete=\"CASCADE\", onupdate=\"CASCADE\"),\n ),\n Column(\n \"role_id\",\n Integer,\n ForeignKey(\"role.id\", ondelete=\"CASCADE\", onupdate=\"CASCADE\"),\n ),\n)\n","repo_name":"arkanmgerges/cafm.project","sub_path":"src/port_adapter/repository/db_model/user__role__junction.py","file_name":"user__role__junction.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34019474060","text":"def areIsomorphic(s, t):\n if len(s) != len(t):\n return False\n \n s_to_t_mapping = {}\n t_to_s_mapping = {}\n \n for i in range(len(s)):\n s_char = s[i]\n t_char = t[i]\n \n # Check if s_char already has a mapping to t_char\n if s_char in s_to_t_mapping:\n if s_to_t_mapping[s_char] != t_char:\n return False\n else:\n s_to_t_mapping[s_char] = t_char\n \n # Check if t_char already has a mapping to s_char\n if t_char in t_to_s_mapping:\n if t_to_s_mapping[t_char] != s_char:\n return False\n else:\n t_to_s_mapping[t_char] = s_char\n \n return True\n\n# Example usage:\ns1 = \"egg\"\nt1 = \"add\"\nprint(areIsomorphic(s1, t1)) # Output: True\n\ns2 = \"foo\"\nt2 = \"bar\"\nprint(areIsomorphic(s2, t2)) # Output: False\n","repo_name":"ATIF176/Coding-ACM-Fellowship","sub_path":"LeetCode Problems/Hash Tables/isomorphic_string.py","file_name":"isomorphic_string.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"9879914326","text":"import random\nimport time\nimport keyboard\n\ndef volver_jugar():\n contador = 10\n while contador > 0:\n contador -= 1 \n print (contador)\n time.sleep(1)\n\n\ndef run():\n pc_numero = random.randint(1,100)\n numero_usr = int(input(\"Introduce un numero del 1 al 100: \"))\n contador_vidas = 5\n\n while numero_usr != pc_numero:\n if numero_usr < pc_numero:\n print('Mi numero es MAYOR')\n else:\n print('Mi numero es MENOR')\n contador_vidas -=1\n print(\"Te quedan: {} vidas\".format(contador_vidas)) \n\n numero_usr = int(input(\"Introduce otro numero: \"))\n \n if contador_vidas == 1:\n print('HAZ PERDIDO!')\n break \n if numero_usr == pc_numero:\n print('GANASTE!') \n\n print(\"CONTINUAR???: \\n\") \n volver_jugar()\n\n\nif __name__ == '__main__':\n run();","repo_name":"Kron0z/Curso_python_platzi","sub_path":"adivinar_numero.py","file_name":"adivinar_numero.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13955455394","text":"import torch\nimport torch.nn as nn\nimport torchvision\n\ndef vgg16_bn():\n vgg16 = torchvision.models.vgg16_bn()\n vgg16.classifier.add_module(\"head\", nn.Linear(1000, 19))\n return vgg16\n\ndef vgg16_bn_pre():\n vgg16 = torchvision.models.vgg16_bn(pretrained=True)\n vgg16.classifier.add_module(\"head\", nn.Linear(1000, 19))\n return vgg16\n\ndef resnet34():\n # 用个预训练\n res34 = torchvision.models.resnet34(pretrained=False)\n numFit = res34.fc.in_features\n res34.fc = nn.Linear(numFit, 19)\n print(\"resnet 34 down\")\n return res34\n\ndef resnet34pre():\n # 用个预训练\n res34 = torchvision.models.resnet34(pretrained=True)\n numFit = res34.fc.in_features\n res34.fc = nn.Linear(numFit, 19)\n print(\"resnet 34 down\")\n return res34\n\ndef resnet34pre_frozen():\n # 用个预训练\n res34 = torchvision.models.resnet34(pretrained=True)\n numFit = res34.fc.in_features\n res34.fc = nn.Linear(numFit, 19)\n for param in res34.parameters():\n param.requires_grad = False\n for param in res34.fc.parameters():\n param.requires_grad = True\n for param in res34.layer4.parameters():\n param.requires_grad = True\n print(\"resnet 34 down\")\n return res34\n\ndef resnext101pre_frozen():\n resnext101 =torchvision.models.resnext101_32x8d(pretrained=True)\n resnext101.add_module(\"head\", nn.Linear(1000, 19))\n # print(resnext101)\n for param in resnext101.parameters():\n param.requires_grad = False\n for param in resnext101.fc.parameters():\n param.requires_grad = True\n for param in resnext101.head.parameters():\n param.requires_grad = True\n return resnext101\n\ndef bitpre():\n import timm\n bit = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=19)\n return bit\n\n\n\n","repo_name":"pengc02/Classification-of-Marine-Animals","sub_path":"code/model/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"21555593019","text":"#Author: David Hernandez\r\n#Description: This program calculates windchill from the user's input of fahrenheit temperature and wind speed.\r\n\r\n#Imports tkinter to be used in the program.\r\nimport tkinter\r\n\r\nclass windchill_calculator_GUI:\r\n def __init__(self):\r\n\r\n #This part creates the main window for the program.\r\n self.main_window = tkinter.Tk()\r\n self.main_window.title(\"Windchill Calculator\")\r\n\r\n #This section creates five frames to group widgets.\r\n self.frame1 = tkinter.Frame()\r\n self.frame2 = tkinter.Frame()\r\n self.frame3 = tkinter.Frame()\r\n self.frame4 = tkinter.Frame()\r\n self.frame5 = tkinter.Frame()\r\n\r\n #Create the widget for frame1.\r\n self.title_label = tkinter.Label(self.frame1,\r\n text='Windchill Calculator',\r\n font = ('verdana', 12))\r\n\r\n #Creates the widget for frame2.\r\n self.prompt_label = tkinter.Label(self.frame2,\r\n text='Enter the temperature in degrees Fahrenheit:')\r\n self.fahrenheit_entry = tkinter.Entry(self.frame2,\r\n width=10)\r\n\r\n #Creates the widget for frame3.\r\n self.prompt_two = tkinter.Label(self.frame3,\r\n text='Enter the wind speed in mph:')\r\n self.wind_speed_entry = tkinter.Entry(self.frame3,\r\n width=10)\r\n\r\n #Creates the widget for frame4.\r\n self.magic_button = tkinter.Button(self.frame4,\r\n text='Calculate Windchill',\r\n command = self.convert)\r\n\r\n #Creates the widget for frame5.\r\n self.result = tkinter.Label(self.frame5,\r\n text='The windchill temperature is:')\r\n\r\n #Pack all the frame widgets.\r\n self.title_label.pack()\r\n self.prompt_label.pack(side='left')\r\n self.fahrenheit_entry.pack(side='left')\r\n self.prompt_two.pack(side='left')\r\n self.wind_speed_entry.pack(side='left')\r\n self.magic_button.pack()\r\n self.result.pack()\r\n\r\n #Pack the frames into the main window.\r\n self.frame1.pack()\r\n self.frame2.pack()\r\n self.frame3.pack()\r\n self.frame4.pack()\r\n self.frame5.pack()\r\n\r\n # Enter the tkinter main loop.\r\n tkinter.mainloop()\r\n\r\n #This part manages the calculations of windchill by taking the users' input of wind speed and temperature.\r\n def convert(self):\r\n\r\n #These lines define the variables temperature and wind speed with values input by the user earlier.\r\n temperature = float(self.fahrenheit_entry.get())\r\n wind_speed = float(self.wind_speed_entry.get())\r\n\r\n #This section calculates windchill and formats it.\r\n windchill = 35.74 + 0.6215 * temperature - 35.75 * wind_speed ** 0.16 + 0.4275 * temperature * wind_speed ** 0.16\r\n windchill = (\"%.1f\" % windchill)\r\n\r\n #These lines remove the last text piece of the GUI and puts it back with the appropriate wind chill result from the calculations.\r\n self.frame5.pack_forget()\r\n self.result.pack_forget()\r\n self.result = tkinter.Label(self.frame5,\r\n text='The windchill temperature is: '+str(windchill))\r\n self.frame5.pack()\r\n self.result.pack()\r\n\r\n#Create an instance of the windchill_calcultor_GUI class.\r\nwindchill_calculate = windchill_calculator_GUI()\r\n","repo_name":"SnackerSnake/PythonHomework","sub_path":"HW15/HW15.py","file_name":"HW15.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14629300229","text":"import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ndef send_mail(subject,user_name,mail_id,body):\n mail_body_object = MIMEMultipart('alternatief')\n mail_body_object['Subject'] = subject\n mail_body_object['From'] = 'PROJECT SYSTEM '#from_adres\n mail_body_object['To'] =f'{user_name} <{mail_id}>'\n part1 = MIMEText(body, 'html')\n mail_body_object.attach(part1)\n boodskap = MIMEText(\"

    Project Management system.

    \", 'html')\n mail_body_object.attach(boodskap)\n print('MAIL ATTACHED')\n try:\n mail = smtplib.SMTP('smtp.gmail.com',587)\n mail.ehlo()\n mail.starttls()\n mail.login('oneadsakib@gmail.com','shashi11149')\n mail.sendmail('oneadsakib@gmail.com',mail_id,mail_body_object.as_string())\n print('MAIL SENT')\n except smtplib.SMTPHeloError:\n print ('The server responded weird stuff to my login request, please try again')\n except smtplib.SMTPAuthenticationError:\n #DEVELOPER SIDE\n print ('Your account name or password is incorrect, please try again using the correct stuff')\n except smtplib.SMTPException:\n print('NO Internet')\n except Exception as e:\n if str(e)=='[Errno 11001] getaddrinfo failed':\n print('MAIL PORT NOT WORKING or INTERNET PROBLEM')\n else:\n print('Unknown ERROR'+str(e))\n","repo_name":"KashmiraS/flask_demo_project","sub_path":"project/mailer_module/mailer_class.py","file_name":"mailer_class.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25655965302","text":"from typing import Union\n\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nfrom gym_sts.spaces.constants.potions import PotionCatalog\nfrom gym_sts.spaces.observations.types import Potion, PotionBase, ShopPotion\n\n\n@st.composite\ndef create_potion_base(draw, potion_id=st.sampled_from(PotionCatalog.ids)):\n potion_id = draw(potion_id)\n potion_metadata = getattr(PotionCatalog, potion_id)\n\n return draw(\n st.builds(\n PotionBase,\n id=st.just(potion_id),\n name=st.just(potion_metadata.name),\n requires_target=st.just(potion_metadata.requires_target),\n )\n )\n\n\n@given(create_potion_base())\ndef test_potion_base_serde_binary(potion: PotionBase):\n ser = potion.serialize()\n de = potion.deserialize(ser)\n\n assert potion == de\n\n\n@given(create_potion_base())\ndef test_potion_base_serde_discrete(potion: PotionBase):\n ser = potion.serialize(discrete=True)\n de = potion.deserialize(ser)\n\n assert potion == de\n\n\ndef test_potion_base_serde_empty_binary():\n ser = PotionBase.serialize_empty()\n de = PotionBase.deserialize(ser)\n\n assert de.id == PotionCatalog.NONE.id\n\n\ndef test_potion_base_serde_empty_discrete():\n ser = PotionBase.serialize_empty(discrete=True)\n de = PotionBase.deserialize(ser)\n\n assert de.id == PotionCatalog.NONE.id\n\n\ndef create_potion_subclass(Model: Union[type[Potion], type[ShopPotion]]):\n @st.composite\n def create_subclass(draw, potion_bases=create_potion_base()):\n potion_base = draw(potion_bases)\n\n return draw(\n st.builds(\n Model,\n id=st.just(potion_base.id),\n name=st.just(potion_base.name),\n requires_target=st.just(potion_base.requires_target),\n )\n )\n\n return create_subclass()\n\n\n@given(create_potion_subclass(Potion))\ndef test_potion_serde(potion: Potion):\n ser = potion.serialize()\n de = potion.deserialize(ser)\n\n assert potion == de\n\n\ndef test_potion_serde_empty():\n ser = Potion.serialize_empty()\n de = Potion.deserialize(ser)\n\n assert de.id == PotionCatalog.NONE.id\n\n\n@given(create_potion_subclass(ShopPotion))\ndef test_shop_potion_serde(potion: ShopPotion):\n ser = potion.serialize()\n de = potion.deserialize(ser)\n\n assert potion == de\n\n\ndef test_shop_potion_serde_empty():\n ser = ShopPotion.serialize_empty()\n de = ShopPotion.deserialize(ser)\n\n assert de.id == PotionCatalog.NONE.id\n assert de.price == 0\n","repo_name":"kronion/gym-sts","sub_path":"tests/observations/serde/types/test_potions.py","file_name":"test_potions.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"62"} +{"seq_id":"3739561688","text":"from typing import Sequence\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom src import vision, model\n\n\ndef plot_image(img: np.ndarray):\n plt.figure(figsize=(12, 7))\n # Matplotlib uses RGB format\n rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n plt.imshow(rgb, aspect=\"auto\")\n plt.show()\n\n\ndef display_aruco_markers(source: vision.FrameSource, tools: vision.VisionTools):\n img = source.get_frame()\n corners, ids = tools.get_aruco_markers(img)\n\n result = img.copy()\n cv2.aruco.drawDetectedMarkers(result, corners, ids)\n plot_image(result)\n\n\ndef draw_path(img: np.ndarray, path: Sequence[model.Point]) -> np.ndarray:\n result = img.copy()\n # Draw the path\n for i in range(len(path) - 1):\n start = path[i]\n end = path[i + 1]\n cv2.line(\n result, np.int32(start.v), np.int32(end.v), color=(255, 0, 255), thickness=2\n )\n return result\n\n\ndef draw_robot_pose(img: np.ndarray, robot: model.Robot) -> np.ndarray:\n result = img.copy()\n # Convert to array for drawing\n position = np.int32(robot.position.v)\n\n # Compute direction vector for drawing\n alpha = robot.angle\n direction_vector = np.array([-np.sin(-alpha), np.cos(-alpha)])\n\n # Draw a circle at the detected position\n cv2.circle(result, position, 4, color=(0, 255, 255), thickness=8)\n cv2.arrowedLine(\n result, position, position + np.int32(100 * direction_vector), (0, 0, 255), 2\n )\n return result\n\n\ndef draw_centroids(img: np.ndarray, centroids: np.ndarray) -> np.ndarray:\n result = img.copy()\n for centroid in centroids:\n cv2.circle(result, np.int32(centroid), 4, color=(0, 255, 0), thickness=2)\n return result\n\n\ndef draw_contours(img: np.ndarray, contours: np.ndarray) -> np.ndarray:\n result = img.copy()\n cv2.drawContours(result, contours, -1, (255, 0, 0))\n return result\n\n\ndef draw_world(img: np.ndarray, world: model.World) -> np.ndarray:\n result = img.copy()\n # Draw a green circle at the goal\n cv2.circle(result, np.int32(world.goal.v), 4, (0, 255, 0), 2)\n\n # Convert obstacle points to contours\n contours = []\n for obstacle in world.obstacles:\n contour = []\n for point in obstacle:\n contour.append([point.v])\n contours.append(np.array(contour))\n # Draw blue contours for obstacles\n cv2.drawContours(result, contours, -1, (255, 0, 0))\n\n # Draw robot position and orientation\n # Convert to array for drawing\n position = np.int32(world.robot.position.v)\n\n # Compute direction vector for drawing\n alpha = world.robot.angle\n direction_vector = np.array([-np.sin(-alpha), np.cos(-alpha)])\n\n # Draw a circle at the detected position\n cv2.circle(result, position, 4, color=(0, 255, 255), thickness=8)\n cv2.arrowedLine(\n result, position, position + np.int32(100 * direction_vector), (0, 0, 255), 2\n )\n return result\n","repo_name":"louisgevers/mobile-robotics-project","sub_path":"src/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"26148776001","text":"def checkFancyNumber(num):\r\n rotated_map = {0:0,1:1,6:9,8:8,9:6}\r\n res = []\r\n num_arr = [int(i) for i in str(num)]\r\n for i in num_arr:\r\n res.append(rotated_map[i])\r\n rev = res[::-1]\r\n if(rev==num_arr):\r\n print(\"Fancy number\")\r\n else:\r\n print(\"Not a fancy number\")\r\n\r\ncheckFancyNumber(996)","repo_name":"sravyaysk/cracking-the-coding-interview-solutions","sub_path":"LeetCode/FancyNumber.py","file_name":"FancyNumber.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"30722593755","text":"\ndef remove_garbage(stream):\n \"\"\" Remove the garbage, and count the amount of garbage \"\"\"\n garbage = False\n data = \"\"\n garbage_count = 0\n\n for index, char in enumerate(stream):\n if not garbage:\n if char == '<':\n garbage = True\n else:\n data += char\n\n else:\n if stream[index - 1] == '!':\n # Remove the next char\n stream = stream[:index] + '0' + stream[index+1:]\n elif char == '>':\n garbage = False\n elif char != '!':\n garbage_count += 1\n\n return data, garbage_count\n\n\ndef group_score(stream):\n \"\"\" Get the score for the remaining groups \"\"\"\n score = 0\n open = 0\n\n for char in stream:\n if char == '{':\n open += 1\n\n elif char == '}':\n score += open\n open -= 1\n\n return score\n\n\nif __name__ == '__main__':\n stream = open('input.txt').read()\n\n stream, garbage_count = remove_garbage(stream)\n\n print(\"Score: %d\" % group_score(stream))\n print(\"Garbage count: %d\" % garbage_count)\n\n","repo_name":"4ilo/Advent-Of-Code-2017","sub_path":"day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72035769799","text":"\"\"\"\r\nOne-dimensional grid delay task.\r\n\r\nThis task tests an agent's ability to properly ascribe reward to the\r\ncorrect cause. The reward is delayed by a variable amount, which\r\nmakes the task challenging.\r\n\"\"\"\r\nimport numpy as np\r\n\r\nimport becca.brain as becca_brain\r\nfrom becca_test.grid_1D import World as Grid_1D_World\r\n\r\n\r\nclass World(Grid_1D_World):\r\n \"\"\"\r\n One-dimensional grid task with delayed reward\r\n\r\n This task is identical to the grid_1D task with the\r\n exception that reward is randomly delayed a few time steps.\r\n\r\n Most of this world's attributes are defined in base_world.py.\r\n The few that aren't are defined below.\r\n \"\"\"\r\n def __init__(self, lifespan=None):\r\n \"\"\"\r\n Initialize the world. Base it on the grid_1D world.\r\n\r\n Parameters\r\n ----------\r\n lifespan : int\r\n The number of time steps to continue the world.\r\n \"\"\"\r\n Grid_1D_World.__init__(self, lifespan)\r\n self.name = 'grid_1D_delay'\r\n print('--delayed')\r\n\r\n # max_delay : int\r\n # The maximum number of time steps that the reward may be delayed.\r\n self.max_delay = 1\r\n # future_reward : list of floats\r\n # The reward that has been received, but will not be delivered to\r\n # the agent yet. The index of the list indicates how many time\r\n # steps will pass before delivery occurs.\r\n self.future_reward = [0] * self.max_delay\r\n\r\n self.visualize_interval = 1e6\r\n\r\n def assign_reward(self):\r\n \"\"\"\r\n Calcuate the reward corresponding to the current state and assign\r\n it to a future time step.\r\n\r\n Returns\r\n -------\r\n reward : float\r\n The reward associated the set of input sensors.\r\n \"\"\"\r\n new_reward = 0\r\n if int(self.world_state) == 3:\r\n new_reward += 1\r\n if int(self.world_state) == 8:\r\n new_reward -= 1\r\n # Punish actions just a little\r\n new_reward -= self.energy * self.energy_cost\r\n # Find the delay for the reward\r\n delay = np.random.randint(0, self.max_delay)\r\n self.future_reward[delay] += new_reward\r\n # Advance the reward future by one time step\r\n self.future_reward.append(0)\r\n reward = self.future_reward.pop(0)\r\n return reward\r\n\r\n def visualize(self, brain=None):\r\n \"\"\"\r\n Show what's going on in the world.\r\n \"\"\"\r\n state_image = ['.'] * (self.num_positions + self.n_actions + 2)\r\n state_image[self.simple_state] = 'O'\r\n state_image[self.num_positions:self.num_positions + 2] = '||'\r\n action_index = np.where(self.action > 0.1)[0]\r\n if action_index.size > 0:\r\n for i in range(action_index.size):\r\n state_image[self.num_positions + 2 + action_index[i]] = 'x'\r\n print(''.join(state_image))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n becca_brain.run(World())\r\n","repo_name":"brohrer/becca_test","sub_path":"becca_test/grid_1D_delay.py","file_name":"grid_1D_delay.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"6111789133","text":"from argparse import ArgumentParser\nfrom telegram_notifier import TelegramNotifier\nimport logging\n\nif __name__ == '__main__':\n arg_parser = ArgumentParser()\n arg_parser.add_argument('--config', default='telegram_notifier_config.yaml')\n arg_parser.add_argument('--log-level', default='ERROR')\n args = arg_parser.parse_args()\n\n logging.basicConfig(level=args.log_level, format='[%(asctime)s %(levelname)s] %(message)s')\n\n telegram_notifier = TelegramNotifier(args.config)\n telegram_notifier.refresh()\n","repo_name":"electrosaurus/CanalService","sub_path":"purchase_monitoring/refresh_telegram_notifier.py","file_name":"refresh_telegram_notifier.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9378323391","text":"# When babies babble, they say things like GAGAGOOGOO or BABABABA. For the purposes of this question, we'll define a baby talk word to be any non-empty string of letters that can be divided into two equal-length portions in such a way that the first portion is identical to the second.\n\n# Based on that definition, the following strings are words in baby talk: GAGA, GOOGOO, BABA, GUBBAGUBBA, DOGGIEDOGGIE, FDSFDS, IWANTMOREMILKIWANTMOREMILK, and XX.\n\n# The following strings are not words in baby talk: BABAB, GAGOO, BA, DOGGIE, and X.\n\n# Complete the baby_talk function to find the longest substring consisting of baby talk, as defined above, and return that length. In the test cases below, the longest baby talk string in each input string is underlined.\n\n# Input\tOutput\n# GOOGOOGAGA\t6\n# BABABABA\t8\n# PTHHPTHHBAGOOGOOGAGABOOOOO\t6\n# XYBABABABAXYX\t8\n# BABAGOOGOOGOOGOOGOOGOOBA\t18\n# NOBABYTALKHERE\t0\n\ndef baby_talk(s):\n # indices = {}\n # result = []\n # for index, value in enumerate(s):\n # if value not in indices:\n # indices[value] = index\n # keys = indices.keys()\n # values = indices.values()\n # for i in values:\n # if (i - values[i+1]) < 1:\n # result.append(i,i+1)\n # return keys, values\n # i = (s+s).find(s, 1, -1)\n # return None if i == -1 else s[:i]\n\n window = len(s)\n if window % 2 == 1:\n window -= 1\n while window > 0:\n high = len(s) - window + 1\n for i in range(0, high):\n substr = s[i:i+window]\n half = window // 2\n first = substr[:half]\n second = substr[half:]\n if second == first:\n return window\n window -= 2\n return window\n\ns = 'BABAGOOGOOGOOGOOBA'\nprint(baby_talk(s))\n","repo_name":"kariscourey/hr-mod3-practice-problems","sub_path":"baby.py","file_name":"baby.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35631047606","text":"# 다이나믹 프로그래밍\n# https://www.acmicpc.net/problem/1912\nimport sys; input=sys.stdin.readline\nn = int(input())\na = list(map(int, input().split()))\naccum = [a[0]]\nfor i in range(len(a) - 1):\n accum.append(max(accum[i] + a[i + 1], a[i + 1]))\nprint(max(accum))\n#n = int(input())\n#a = list(map(int, input().split()))\n#accum = [0,a[0]]\n#for i in range(1,n):\n# accum.append(accum[i]+a[i])\n#M = -1000*n\n#for i in range(1, n+1):\n# for j in range(i,n+1):\n# s = accum[j]-accum[j-i]\n# if s > M:\n# M = s\n#print(M)","repo_name":"minho511/algorithm_solution","sub_path":"baekjoon_python/[1912] 연속합.py","file_name":"[1912] 연속합.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40800635811","text":"#!/usr/bin/env python\n\n# export matched addresses for ../school-data/maps/school-address.tsv\n\nfrom config import MONGO_URL\nfrom pymongo import MongoClient\n\nclient = MongoClient(MONGO_URL)\ndb = client.get_default_database()\n\nfields = [\"school\", \"address\", \"address-match\"]\n\nif __name__ == '__main__':\n items = list(db['school-address'].find())\n schools = iter(sorted(items, key=lambda item: item['school']))\n print(\"\\t\".join(fields) + \"\\n\")\n for row in schools:\n print(\"\\t\".join([row[field] for field in fields]))\n","repo_name":"openregister/addressbase-demo","sub_path":"schools/export-school-address.py","file_name":"export-school-address.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"44906587320","text":"import filterbank as fb\nfrom filterbank import _extract_image_patches\nimport soundfile as sf\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn.functional as tf\nfrom torch.utils import data\nfrom torch import nn, tensor\nimport csv\n\ndef wvd(signal, window, L, bins, hop, mode, J, Q):\n\n signal = torch.FloatTensor(signal)\n if len(signal.shape) > 3:\n signal = torch.squeeze(signal)\n if len(signal.shape) > 2:\n signal = torch.squeeze(signal)\n if len(signal.shape) == 1:\n signal = torch.unsqueeze(signal, 0)\n s = torch.stft(signal, n_fft = 2 * window, win_length = window, hop_length = hop)\n s = s[:,:,:,1]+s[:,:,:,0]\n\n # remodulate the stft prior the spectral correlation for simplicity\n\n pi = 2*3.1415926\n step = 1 / window\n freq = np.linspace(-step * L, step * L, 2 * L + 1)\n time = np.arange(s.shape[-1]).reshape((-1, 1))\n mask = (np.cos(np.pi * time * freq) + np.sin(np.pi * time * freq)*1j) * np.hanning(2 * L + 1)\n extract_patches = nn.Unfold(kernel_size = (2*L+1,1), stride= (2,1), padding = 2)\n signal = extract_patches(torch.unsqueeze(s, dim = 1))\n s = signal.numpy()\n x = []\n for i in range(s.shape[0]):\n output = np.dot(mask, s[i,:,:] * np.conj(np.flip(s[i,:,:])))\n x.append(output)\n s = np.array(x)\n s = s.astype(np.float)\n\n filter, mu, cor, sigma, mix = fb.generate_gaussian_filterbank(s.shape[-1], J*Q, s.shape[0], 5, 22050)\n if len(filter.shape) == 4 :\n filter = torch.squeeze(filter)\n if len(filter.shape) == 2 :\n filter = torch.unsqueeze(filter, 0)\n# print(filter.shape)\n# print(s.shape)\n wvd_convolved = torch.bmm(torch.FloatTensor(s), filter)\n\n return wvd_convolved\n\n\n\ndef norm(arr):\n return (arr - np.mean(arr) ) / np.std(arr)\n\n\nclass DataSimp(data.Dataset):\n def __init__(self, df):\n super(DataSimp, self)\n self.df = df\n def __len__(self):\n return len(self.df)\n def __getitem__(self, idx):\n return self[idx]['sig'], self[idx]['label']\n\n\nclass Dataset(data.Dataset):\n def __init__(self, df, filename=False, white_noise=False, pink_noise=False, rnd_shift=False, mirror=False, gammatone=False):\n super(Dataset, self)\n self.df = df\n self.filename = filename # true if you want the filename in getitem\n self.rng = np.random.RandomState(42)\n self.pink_noise = pink_noise\n self.white_noise = white_noise\n self.rnd_shift = rnd_shift\n self.mirror = mirror\n self.gammatone = gammatone\n\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n\n row = self.df[idx]\n print(row)\n sig = 0.01*np.random.rand(12000)\n fs = 24000\n\n\n for root, dirs, files in os.walk(\"/mnt\"):\n for file in files:\n if file == str(row[14]):\n path = os.path.join(root,file)\n sig, fs = sf.read(path)\n sig = sig[:,0] if sig.ndim == 2 else sig\n sig = np.concatenate([sig, np.zeros(int(row[12])*fs)])\n sig = sig[int(row[12])*fs:(int(row[12])+5)*fs]\n sig = norm(sig)\n if self.mirror :\n sig = np.flip(sig) if np.random.random() > .5 else sig\n if self.rnd_shift :\n shift = int(np.random.random()*5*fs)\n sig = np.concatenate([sig[shift:], sig[:shift]])\n\n sig = norm(sig)\n if self.gammatone :\n sig = fftweight.fft_gtgram(sig, fs, 512/fs, 64/fs, 64, 500)\n\n if self.filename:\n return tensor(sig).unsqueeze(0).float(), float(row[15]), row.path\n else:\n return tensor(sig).unsqueeze(0).float(), float(row[15])\n\n\ndef rewrite(infile):\n edit = []\n for i in range(len(infile)):\n string = str(infile[i])\n row = string.replace('[','')\n row = row.replace(']','')\n row = row.replace('\\\"','')\n row = row.replace('\\n','')\n row = row.replace(' ','')\n row = row.replace('\\'','')\n row = row.replace('?','')\n edit.append(row)\n return(edit)\n\n\n\ndef array(infile):\n result = []\n with open(infile) as csvfile:\n reader = csv.reader(csvfile) # change contents to floats\n for row in reader: # each row is a list\n result.append(row)\n return(result)\n\ndef PrintModel(model, inlength=22050, gammatone=False, indata=None):\n x = tensor(np.arange(inlength)).view(1, 1, -1).float()\n x = fftweight.fft_gtgram(np.arange(inlength), 22050, 512/22050, 64/22050, 64, 500) if gammatone else np.arange(inlength)\n x = tensor(x).float().unsqueeze(0).unsqueeze(0)\n x = indata if indata is not None else x\n print('in shape : ',x.shape, '\\n')\n prevshape = x.shape\n for layer in model:\n print(layer)\n x = layer(x)\n if x.shape != prevshape:\n print('Outputs : ',x.shape)\n prevshape = x.shape\n print()\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n def forward(self, x):\n return x.view(x.shape[0], -1)\n\n","repo_name":"anatoole/WVDPytorch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10187545490","text":"from aqt import mw\nfrom aqt.qt import *\nfrom .AutoForvoTts import AutoForvoTts\nfrom .ForvoTts import ForvoTts\nfrom aqt import gui_hooks\n\n\ndef openForvoAudioGenerator():\n cardCount = mw.col.cardCount()\n config = mw.addonManager.getConfig(__name__)\n mw.myWidget = widget = AutoForvoTts(mw)\n widget.show()\n\naction = QAction(\"Add Forvo TTS to deck\", mw)\naction.triggered.connect(openForvoAudioGenerator)\nmw.form.menuTools.addAction(action)\n\ndef addForvoTtsOption(editerWindow, qmenu):\n qmenu.addAction(\"Add Forvo Audio\", lambda: forvoTts(editerWindow))\n\n\ndef forvoTts(editorWindow):\n editor = editorWindow.editor\n results = []\n note = editor.note\n widget = ForvoTts(mw, note, editor.parentWindow, editor.currentField, editorWindow.selectedText())\n if(widget.exec_()):\n result = widget.finalResult\n editor.web.setFocus()\n editor.web.eval(\"focusField(%d);\" % int(widget.destinationFieldComboBox.currentIndex())) \n print(result.getBucketFilename())\n #editor.web.eval(\"wrap('', '[sound:\" + result.getBucketFilename() + \"]');\") \n # Why am I so terrible at finding obvious functions, that whole web.eval stuff took a while to figure out\n editor.doPaste(\"[sound:\" + result.getBucketFilename() + \"]\",internal=False, extended=True)\n \n\ngui_hooks.editor_will_show_context_menu.append(addForvoTtsOption)\n","repo_name":"Rascalov/Anki-Simple-Forvo-Audio","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"62"} +{"seq_id":"44918858934","text":"\"\"\"\nVinit Ranjan, Chris Eckman\nLineage Logistics\n\nA function to subsample from a list of points and accept each point with probability sample_frac\nEquivalent to SubsampleFrac.py, only it reads directly from the .las file\n\nInputs:\npoints - list of points to sample from\nsample_frac - fraction of desired points\n\nReturns:\npoints - list of sampled points\n\"\"\"\nimport numpy as np\nimport pdb\nfrom laspy.file import File\nfrom tqdm import tqdm, trange\n\n\ndef subsample_frac_from_las_data(filename, sample_frac=.1):\n points = []\n\n with File(filename, mode='r') as in_file:\n scales = in_file.header.scale\n offsets = in_file.header.offset\n x_s, y_s, z_s = scales[0], scales[1], scales[2]\n x_o, y_o, z_o = offsets[0], offsets[1], offsets[2]\n\n for point in tqdm(in_file.points, total=len(in_file.points), desc=\"Sampling\"):\n # for i in trange(len(in_file.x), desc=\"Sampling\"):\n if np.random.random_sample() < sample_frac:\n points.append(np.array([scale(point[0][0], x_s, x_o),\n scale(point[0][1], y_s, y_o),\n scale(point[0][2], z_s, z_o)], dtype=np.float32))\n return points\n\n\n# helper function to scale points\ndef scale(point, scale_factor, offset):\n return (point * scale_factor) + offset\n","repo_name":"vinitranjan1/PointCloudProcessing","sub_path":"SubsampleFunctions/SubsampleFracFromLAS.py","file_name":"SubsampleFracFromLAS.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12235273412","text":"import numpy as np\nimport scipy.linalg as la\nimport re\n\n\ndef get_rf_params_from_vna_csv(filename, z0=50.0 + 0.0j):\n\t(freq_hz, Sdb, Sdeg)\t = get_sdb_from_vna_csv(filename)\n\tS = sdb2sri(Sdb, Sdeg)\n\tZ = s2z(S, z0)\n\tT = s2abcd(S, z0)\n\t\n\treturn (freq_hz, S, Z, T, Sdb, Sdeg)\n\ndef get_sdb_from_vna_csv(filename):\n\t# Reads CSV generated by VNA\n\t# Extracts S params in DB/DEG form and returns freq, Sdb, Sdeg\n\t\n\tinfile = open(filename, 'r')\n\t\n\theader_found = False\n\tSdb_list = []\n\tSdeg_list = []\n\tfreq_hz_list = []\n\tfor line in infile:\n\t\tnline = line.strip()\n\t\tnline_arr = nline.split(\",\")\n\t\t\n\t\tif not header_found:\n\t\t\tm = re.search(\"Freq\", nline_arr[0])\n\t\t\tif (m): # found header\n\t\t\t\theader_found = True\n\t\telif len(nline_arr) == 9:\n\t\t\tfreq_hz_list.append( float(nline_arr[0]) )\n\t\t\tSdb_f = np.array( [ [float(nline_arr[1]), float(nline_arr[3])], [float(nline_arr[5]), float(nline_arr[7]) ] ] )\n\t\t\tSdeg_f = np.array( [ [float(nline_arr[2]), float(nline_arr[4])], [float(nline_arr[6]), float(nline_arr[8]) ] ] )\n\t\t\tSdb_list.append(Sdb_f)\n\t\t\tSdeg_list.append(Sdeg_f)\n\t\n\tSdb = np.array(Sdb_list)\n\tSdeg = np.array(Sdeg_list)\n\tfreq_hz = np.array(freq_hz_list)\n\t\n\treturn(freq_hz, Sdb, Sdeg)\t\n\n\t\ndef s2z(S, z0):\n\t# Converts real/imag S params to z params\n\tZ = np.zeros( np.shape(S), dtype=complex)\n\tI = np.eye(2)\n\tfor idx, SS in enumerate(S):\n\t\tZZ = np.dot( la.inv( I - SS ), (I + SS) ) * z0\n\t\tZ[idx] = ZZ\n\t\t\n\treturn(Z)\n\t\n\ndef z2s(Z, z0):\n\t# converts impedance matrix to scattering matrix\n\tS = np.zeros( np.shape(Z), dtype=complex)\n\tI = np.eye(2)\n\tfor idx, ZZ in enumerate(Z):\n\t\tSS = np.dot( Z - z0*I, la.inv( Z + z0*I) )\n\t\tS[idx] = SS\n\t\n\treturn(S)\n\n\ndef z2y(Z):\n\t# converts impedance matrix to admittance matrix\n\tY = np.zeros( np.shape(Z), dtype=complex)\n\tfor idx, ZZ in enumerate(Z):\n\t\tYY = la.inv(ZZ)\n\t\tY[idx] = YY\n\t\n\treturn(Y)\n\t\t\n\ndef z2abcd(Z):\n\t# Converts impedance matrix to transfer matrix (ABCD matrix)\n\tT = np.zeros( np.shape(Z), dtype=complex)\n\tfor idx, ZZ in enumerate(Z):\n\t\tZ11 = ZZ[0][0]\n\t\tZ12 = ZZ[0][1]\n\t\tZ21 = ZZ[1][0]\n\t\tZ22 = ZZ[1][1]\n\t\tA = Z11 / Z21\n\t\tB = (Z11*Z22 - Z12*Z21) / Z21\n\t\tC = 1 / Z21\n\t\tD = Z22/ Z21\n\t\t\n\t\t\n\t\tTf = np.array( [ [A, B], [C, D] ])\n\t\tT[idx] = Tf\n\t\n\treturn(T)\n\n\n#def abcd2s(T, z0):\n#\t\n#\tS = np.zeros( np.shape(T), dtype=complex)\n#\tfor idx, TT in enumerate(T):\n#\t\tA = TT[0][0]\n#\t\tB = TT[0][1]\n#\t\tC = TT[1][0]\n#\t\tD = TT[1][1]\n#\t\t\n#\t\tdenom = A + B/z0 + C*z0 + D\n#\t\tS11 = (A + B/z0 - C*z0 + D) / denom\n#\t\tS12 = 2*(A*D - B*C) / denom\n#\t\tS21 = 2/denom\n#\t\tS22 = (-A + B/z0-C*z0 + D ) / denom\n#\t\t\n#\t\tSS = np.array( [[S11, S12] , [S21, S22] ])\n#\t\tS[idx] = SS\n#\t\n#\treturn(S)\n#\t\ndef abcd2s(abcd_struct, Z01, Z02):\n\t# convert ABCD matrix to S matrix in real/imag format\n\n\tR01 = Z01.real\n\tR02 = Z02.real\n\tnum_freqs = len(abcd_struct)\n\tS = np.zeros( (num_freqs, 2, 2), dtype=complex )\n\tfor idx, mat in enumerate(abcd_struct):\n\t\tmat = abcd_struct[idx]\n\t\tA = mat[0][0]\n\t\tB = mat[0][1]\n\t\tC = mat[1][0]\n\t\tD = mat[1][1]\n\n\t\tdenom = (A*Z02 + B + C*Z01*Z02 + D*Z01)\n\n\t\tS11 = ( A*Z02 + B - C*np.conj(Z01)*Z02 - D*np.conj(Z01) ) / denom\n\t\tS12 = ( 2*(A*D - B*C)*np.sqrt(R01*R02) ) / denom\n\t\tS21 = ( 2*np.sqrt(R01*R02) ) / denom\n\t\tS22 = (-A*np.conj(Z02) + B - C*Z01*np.conj(Z02) + D*Z01 ) / denom\n\n\t\tS[idx][0][0] = S11\n\t\tS[idx][0][1] = S12\n\t\tS[idx][1][0] = S21\n\t\tS[idx][1][1] = S22\n\n\n\treturn S\t\n\t\n\t\n\t\n#def s2abcd( S, z0):\n#\tZ = s2z(S, z0)\n#\tT = z2abcd(Z)\n#\t\n#\treturn(T)\n\t\ndef s2abcd(S, Z01=50, Z02=50):\n\t# Convert Sparams in Real/Imag format to ABCD matrix\n\tR01 = Z01.real\n\tR02 = Z02.real\n\tabcd = np.zeros( np.shape(S), dtype=complex )\n\tfor idx, SS in enumerate(S):\n\t\tS11 = SS[0][0]\n\t\tS12 = SS[0][1]\n\t\tS21 = SS[1][0]\n\t\tS22 = SS[1][1]\n\n\t\tdenom = 2*S21*np.sqrt(R01*R02)\n\t\t\n\t\t\n\t\tA = ( (np.conj(Z01) + S11*Z01)*(1-S22)+S12*S21*Z01 ) / denom\n\t\tB = ( (np.conj(Z01) + S11*Z01)*(np.conj(Z02)+S22*Z02)-S12*S21*Z01*Z02 ) / denom\n\t\tC = ( (1-S11)*(1-S22)-S12*S21 ) / denom\n\t\tD = ( (1-S11)*(np.conj(Z02)+S22*Z02) + S12*S21*Z02 ) / denom\n\n\t\tabcd[idx][0][0] = A\n\t\tabcd[idx][0][1] = B\n\t\tabcd[idx][1][0] = C\n\t\tabcd[idx][1][1] = D\n\n\n\treturn abcd\n\t\n\t\ndef sdb2sri(Sdb, Sdeg):\n\t# convert DB/DEG to real/imag\n\tnum_freqs = len(Sdb)\n\tSri = np.zeros( (num_freqs, 2, 2), dtype=complex)\n\n\tfor idx in range(len(Sdb)):\n\t\tdb_mat = Sdb[idx]\n\t\tS11_db = db_mat[0][0]\n\t\tS12_db = db_mat[0][1]\n\t\tS21_db = db_mat[1][0]\n\t\tS22_db = db_mat[1][1]\n\n\t\tdeg_mat = Sdeg[idx]\n\t\tS11_deg = deg_mat[0][0]\n\t\tS12_deg = deg_mat[0][1]\n\t\tS21_deg = deg_mat[1][0]\n\t\tS22_deg = deg_mat[1][1]\n\n\t\tS11 = 10**(S11_db/20) * np.complex( np.cos(S11_deg*np.pi/180), np.sin(S11_deg*np.pi/180) )\n\t\tS12 = 10**(S12_db/20) * np.complex( np.cos(S12_deg*np.pi/180), np.sin(S12_deg*np.pi/180) )\n\t\tS21 = 10**(S21_db/20) * np.complex( np.cos(S21_deg*np.pi/180), np.sin(S21_deg*np.pi/180) )\n\t\tS22 = 10**(S22_db/20) * np.complex( np.cos(S22_deg*np.pi/180), np.sin(S22_deg*np.pi/180) )\n\n\t\tSri[idx][0][0] = S11\n\t\tSri[idx][0][1] = S12\n\t\tSri[idx][1][0] = S21\n\t\tSri[idx][1][1] = S22\n\n\treturn Sri\n\t\n\t\ndef sri2sdb(sri_struct):\n\t# convert S params from Real/Imag to DB/Deg\n\tnum_freqs = len(sri_struct)\n\tSdb = np.zeros( (num_freqs, 2, 2))\n\tSdeg = np.zeros( (num_freqs, 2, 2))\n\n\tfor idx in range(len(sri_struct)):\n\t\tri_mat = sri_struct[idx]\n\t\tS11_ri = ri_mat[0][0]\n\t\tS12_ri = ri_mat[0][1]\n\t\tS21_ri = ri_mat[1][0]\n\t\tS22_ri = ri_mat[1][1]\n\n\t\tS11_db = 20*np.log10( np.abs(S11_ri) )\n\t\tS12_db = 20*np.log10( np.abs(S12_ri) )\n\t\tS21_db = 20*np.log10( np.abs(S21_ri) )\n\t\tS22_db = 20*np.log10( np.abs(S22_ri) )\n\n\t\tS11_deg = np.arcsin( S11_ri.imag / np.abs(S11_ri) ) * 180/np.pi\n\t\tS12_deg = np.arcsin( S12_ri.imag / np.abs(S12_ri) ) * 180/np.pi\n\t\tS21_deg = np.arcsin( S21_ri.imag / np.abs(S21_ri) ) * 180/np.pi\n\t\tS22_deg = np.arcsin( S22_ri.imag / np.abs(S22_ri) ) * 180/np.pi\n\t\t\n\t\tif ( S11_ri.real < 0 ) and (S11_ri.imag > 0):\n\t\t\tS11_deg = 180 - S11_deg\n\t\tif ( S12_ri.real < 0 ) and (S12_ri.imag > 0):\n\t\t\tS12_deg = 180 - S12_deg\n\t\tif ( S21_ri.real < 0 ) and (S21_ri.imag > 0):\n\t\t\tS21_deg = 180 - S21_deg\n\t\tif ( S22_ri.real < 0 ) and (S22_ri.imag > 0):\n\t\t\tS22_deg = 180 - S22_deg\n\t\t\n\n\t\tSdb[idx][0][0] = S11_db\n\t\tSdb[idx][0][1] = S12_db\n\t\tSdb[idx][1][0] = S21_db\n\t\tSdb[idx][1][1] = S22_db\n\n\t\tSdeg[idx][0][0] = S11_deg\n\t\tSdeg[idx][0][1] = S12_deg\n\t\tSdeg[idx][1][0] = S21_deg\n\t\tSdeg[idx][1][1] = S22_deg\n\n\treturn (Sdb, Sdeg)\n\t\n\t","repo_name":"wwahby/RF_Extract","sub_path":"rf_support.py","file_name":"rf_support.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73782231946","text":"#!/usr/bin/python3\n\n\nfrom typing import Optional\nfrom PySide2.QtCore import Qt, Signal, Slot\nfrom PySide2.QtGui import QMouseEvent\nfrom PySide2.QtWidgets import QApplication, QCheckBox, QHBoxLayout, QWidget\n\n\nclass CheckBox(QWidget):\n stateChanged = Signal(bool)\n\n def __init__(self, state: Optional[bool] = False, parent: Optional[QWidget] = None) -> None:\n super(CheckBox, self).__init__(parent)\n\n self.checkbox = QCheckBox('', self)\n self.checkbox.setChecked(state)\n self.checkbox.stateChanged.connect(self.stateChanged.emit)\n self.setLayout(QHBoxLayout(self))\n self.layout().addWidget(self.checkbox)\n self.layout().setAlignment(Qt.AlignCenter)\n self.layout().setMargin(0)\n self.mousePressEvent = self.on_mousePressEvent\n\n def on_mousePressEvent(self, event: QMouseEvent):\n if event.button() == Qt.LeftButton:\n self.checkbox.setChecked(not self.checkbox.isChecked())\n\n def setChecked(self, state: bool) -> None:\n self.checkbox.setChecked(state)\n\n\n@Slot(bool)\ndef printState(state: bool):\n print(state)\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n w = CheckBox()\n w.stateChanged.connect(printState)\n w.show()\n sys.exit(app.exec_())\n","repo_name":"kolod/recon-plotter-python","sub_path":"tests/test_CheckBox.py","file_name":"test_CheckBox.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41116291637","text":"# Licensed under an MIT open source license - see LICENSE\nfrom __future__ import print_function, absolute_import, division\n\nimport pytest\nimport astropy.units as u\n\ntry:\n from radio_beam.beam import NoBeamException\n RADIO_BEAM_INSTALLED = True\nexcept ImportError:\n RADIO_BEAM_INSTALLED = False\n\nfrom ._testing_data import header\nfrom ..io import find_beam_width, find_beam_properties\n\n\ndef test_load_beam():\n\n beam_header = header.copy()\n beam_header[\"BMAJ\"] = 1.0\n\n beamwidth = find_beam_width(beam_header)\n\n assert beamwidth == 1.0 * u.deg\n\n\n@pytest.mark.parametrize(('major', 'minor', 'pa'), [(1.0, 0.5, 10),\n (1.0, 'skip', 10),\n (1.0, 0.5, 'skip')])\ndef test_load_beam_props(major, minor, pa):\n\n beam_header = header.copy()\n beam_header[\"BMAJ\"] = major\n if minor != 'skip':\n beam_header[\"BMIN\"] = minor\n if pa != 'skip':\n beam_header[\"BPA\"] = pa\n\n bmaj, bmin, bpa = find_beam_properties(beam_header)\n\n assert bmaj == major * u.deg\n\n if minor == 'skip':\n assert bmin == major * u.deg\n else:\n assert bmin == minor * u.deg\n\n if pa == \"skip\":\n assert bpa == 0 * u.deg\n else:\n assert bpa == pa * u.deg\n\n\n# radio-beam no has an exception for when no beam is found.\n# @pytest.mark.skipif(\"not RADIO_BEAM_INSTALLED\")\nif RADIO_BEAM_INSTALLED:\n @pytest.mark.xfail(raises=NoBeamException)\n def test_load_beam_fail():\n\n find_beam_width(header)\n\n","repo_name":"Astroua/TurbuStat","sub_path":"turbustat/tests/test_load_beam.py","file_name":"test_load_beam.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"81"} +{"seq_id":"38467900638","text":"#!/usr/bin/env python3\n\n# https://stackoverflow.com/questions/305378/list-of-tables-db-schema-dump-etc-using-the-python-sqlite3-api\n\nimport argparse\nimport os.path\nimport sqlite3\n\n\ndef parse_args():\n \"\"\"Get the command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"database file\", required=True)\n return parser.parse_args()\n\n\ndef db_info(db_file):\n db_filename = db_file\n newline_indent = \"\\n \"\n\n if os.path.exists(db_filename):\n try:\n db = sqlite3.connect(db_filename)\n db.text_factory = str\n cur = db.cursor()\n\n result = cur.execute(\n \"SELECT name FROM sqlite_master WHERE type='table';\"\n ).fetchall()\n table_names = sorted(list(zip(*result, strict=True))[0])\n print(\"\\ntables are:\" + newline_indent + newline_indent.join(table_names))\n\n for table_name in table_names:\n result = cur.execute(\"PRAGMA table_info('%s')\" % table_name).fetchall()\n column_names = list(zip(*result, strict=True))[1]\n print(\n (\"\\ncolumn names for %s:\" % table_name)\n + newline_indent\n + (newline_indent.join(column_names))\n )\n except sqlite3.OperationalError as err:\n print(\"Please close all chrome instances:\", err)\n print(\"Error database info:\", err)\n\n db.close()\n else:\n print(f\"Error: file {db_filename} does not exist.\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n db_info(args.file)\n","repo_name":"mikemadden42/pyutils","sub_path":"db_info.py","file_name":"db_info.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33342732328","text":"import deeplake\n\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\n\nfrom deeplake.core.dataset import Dataset\nfrom deeplake.util.exceptions import IngestionError\nfrom deeplake.client.log import logger\n\nfrom ..base import UnstructuredDataset\nfrom ..util import DatasetStructure, TensorStructure\nfrom .utils import YoloData\n\nimport numpy as np\n\nfrom random import shuffle as rshuffle\n\nfrom .constants import (\n DEFAULT_YOLO_COORDINATES_TENSOR_PARAMS,\n DEFAULT_YOLO_LABEL_TENSOR_PARAMS,\n DEFAULT_IMAGE_TENSOR_PARAMS,\n)\n\n\nclass YoloDataset(UnstructuredDataset):\n def __init__(\n self,\n data_directory: str,\n class_names_file: Optional[str] = None,\n annotations_directory: Optional[str] = None,\n image_params: Optional[Dict] = None,\n label_params: Optional[Dict] = None,\n coordinates_params: Optional[Dict] = None,\n allow_no_annotation: Optional[bool] = False,\n verify_class_names: Optional[bool] = True,\n inspect_limit: Optional[int] = 1000,\n creds: Optional[Union[str, Dict]] = None,\n image_creds_key: Optional[str] = None,\n ):\n \"\"\"Container for access to Yolo Data, parsing of key information, and conversions to a Deep Lake dataset\"\"\"\n\n super().__init__(data_directory)\n\n self.class_names_file = class_names_file\n self.data_directory = data_directory\n self.annotations_directory = annotations_directory\n\n self.allow_no_annotation = allow_no_annotation\n self.verify_class_names = verify_class_names\n self.creds = creds\n self.image_creds_key = image_creds_key\n self.inspect_limit = inspect_limit\n\n self.data = YoloData(\n self.data_directory,\n creds,\n self.annotations_directory,\n self.class_names_file,\n )\n self._validate_data()\n\n # Create a separate list of tuples with the intestion data (img_fn, annotation_fn).\n # We do this in advance so missing files are discovered before the ingestion process.\n self._create_ingestion_list()\n\n self._validate_ingestion_data()\n\n self._initialize_params(\n image_params or {}, label_params or {}, coordinates_params or {}\n )\n self._validate_image_params()\n\n def _parse_coordinates_type(self):\n \"\"\"Function inspects up to inspect_limit annotation files in order to infer whether they are polygons or bounding boxes\"\"\"\n\n # If the htype or name of the coordinates is not specified (htype could be bbox or polygon), auto-infer it by reading some of the annotation files\n if (\n \"htype\" not in self.coordinates_params.keys()\n or \"name\" not in self.coordinates_params.keys()\n ):\n # Read the annotation files assuming they are polygons and check if there are any non-empty annotations without 4 coordinates\n coordinates_htype = \"bbox\" # Initialize to bbox and change if contradicted\n coordinates_name = \"boxes\" # Initialize to boxes and change if contradicted\n count = 0\n while count < min(self.inspect_limit, len(self.ingestion_data)):\n fn = self.ingestion_data[count][1]\n if fn is not None:\n _, coordinates = self.data.read_yolo_coordinates(fn, is_box=False)\n for c in coordinates:\n coord_size = c.size\n if coord_size > 0 and coord_size != 4:\n coordinates_htype = \"polygon\"\n coordinates_name = \"polygons\"\n\n count = (\n self.inspect_limit + 1\n ) # Set this to exit the while loop\n break\n\n ## TODO: Add fancier math to see whether even coordinates with 4 elements could be polygons\n count += 1\n\n if \"htype\" not in self.coordinates_params.keys():\n self.coordinates_params[\"htype\"] = coordinates_htype\n\n if \"name\" not in self.coordinates_params.keys():\n self.coordinates_params[\"name\"] = coordinates_name\n\n def _initialize_params(self, image_params, label_params, coordinates_params):\n self.image_params = {\n **DEFAULT_IMAGE_TENSOR_PARAMS,\n **image_params,\n }\n\n self.coordinates_params = {\n **DEFAULT_YOLO_COORDINATES_TENSOR_PARAMS,\n **coordinates_params,\n }\n\n self.label_params = {\n **DEFAULT_YOLO_LABEL_TENSOR_PARAMS,\n **label_params,\n }\n\n self._parse_coordinates_type()\n\n def _create_ingestion_list(self):\n \"\"\"Function creates a list of tuples (image_filename, annotation_filename) that is passed to a deeplake.compute ingestion function\"\"\"\n\n ingestion_data = []\n for img_fn in self.data.supported_images:\n base_name = Path(img_fn).stem\n if base_name + \".txt\" in self.data.supported_annotations:\n ingestion_data.append((img_fn, base_name + \".txt\"))\n else:\n if self.allow_no_annotation:\n logger.warning(\n f\"Annotation was not found for {img_fn}. Empty annotation data will be appended for this image.\"\n )\n\n else:\n raise IngestionError(\n f\"Annotation was not found for {img_fn}. Please add an annotation for this image, of specify allow_no_annotation=True, which will automatically append an empty annotation to the Deep Lake dataset.\"\n )\n ingestion_data.append((img_fn, None))\n\n self.ingestion_data = ingestion_data\n\n def prepare_structure(self) -> DatasetStructure:\n structure = DatasetStructure(ignore_one_group=True)\n self._add_annotation_tensors(structure)\n self._add_images_tensor(structure)\n\n return structure\n\n def _validate_data(self):\n if (\n len(self.data.supported_images) != len(self.data.supported_annotations)\n and self.allow_no_annotation == False\n ):\n raise IngestionError(\n \"The number of supported images and annotations in the input data is not equal. Please ensure that each image has a corresponding annotation, or set allow_no_annotation = True\"\n )\n\n if len(self.data.supported_images) == 0:\n raise IngestionError(\n \"There are no supported images in the input data. Please verify the source directory.\"\n )\n\n def _validate_ingestion_data(self):\n if len(self.ingestion_data) == 0:\n raise IngestionError(\n \"The data parser was not able to find any annotations corresponding to the images. Please check your directories, filename, and extenstions, or consider setting allow_no_annotation = True in order to upload empty annotations.\"\n )\n\n def _validate_image_params(self):\n if \"name\" not in self.image_params:\n raise IngestionError(\n \"Image params must contain a name for the image tensor.\"\n )\n\n def _add_annotation_tensors(\n self,\n structure: DatasetStructure,\n ):\n structure.add_first_level_tensor(\n TensorStructure(\n name=self.label_params[\"name\"],\n params={\n i: self.label_params[i] for i in self.label_params if i != \"name\"\n },\n )\n )\n\n structure.add_first_level_tensor(\n TensorStructure(\n name=self.coordinates_params[\"name\"],\n params={\n i: self.coordinates_params[i]\n for i in self.coordinates_params\n if i != \"name\"\n },\n )\n )\n\n def _add_images_tensor(self, structure: DatasetStructure):\n img_params = self.image_params.copy()\n\n img_params[\"sample_compression\"] = self.image_params.get(\n \"sample_compression\", self.data.most_frequent_image_extension\n )\n name = self.image_params.get(\"name\")\n\n structure.add_first_level_tensor(\n TensorStructure(\n name=name,\n params={i: img_params[i] for i in img_params if i != \"name\"},\n )\n )\n\n def _ingest_data(self, ds: Dataset, progressbar: bool = True, num_workers: int = 0):\n \"\"\"Functions appends the data to the dataset object using deeplake.compute\"\"\"\n\n if self.image_creds_key is not None:\n ds.add_creds_key(self.image_creds_key, managed=True)\n\n # Wrap tensor data needed by the deeplake.compute function into a net dict.\n tensor_meta = {\n \"images\": ds[self.image_params[\"name\"]].meta,\n \"labels\": ds[self.label_params[\"name\"]].meta,\n \"coordinates\": ds[self.coordinates_params[\"name\"]].meta,\n }\n\n @deeplake.compute\n def append_data_bbox(data, sample_out, tensor_meta: Dict = tensor_meta):\n # If the ingestion data is None, create empty annotations corresponding to the file\n if data[1]:\n yolo_labels, yolo_coordinates = self.data.read_yolo_coordinates(\n data[1], is_box=True\n )\n else:\n yolo_labels = np.zeros((0))\n yolo_coordinates = np.zeros((4, 0))\n\n sample_out.append(\n {\n self.image_params[\"name\"]: self.data.get_image(\n data[0],\n tensor_meta[\"images\"].is_link,\n self.image_creds_key,\n ),\n self.label_params[\"name\"]: yolo_labels.astype(\n tensor_meta[\"labels\"].dtype\n ),\n self.coordinates_params[\"name\"]: yolo_coordinates.astype(\n tensor_meta[\"coordinates\"].dtype\n ),\n }\n )\n\n @deeplake.compute\n def append_data_polygon(data, sample_out, tensor_meta: Dict = tensor_meta):\n # If the ingestion data is None, create empty annotations corresponding to the file\n if data[1]:\n yolo_labels, yolo_coordinates = self.data.read_yolo_coordinates(\n data[1], is_box=False\n )\n else:\n yolo_labels = np.zeros((0))\n yolo_coordinates = []\n\n sample_out.append(\n {\n self.image_params[\"name\"]: self.data.get_image(\n data[0],\n tensor_meta[\"images\"].is_link,\n self.image_creds_key,\n ),\n self.label_params[\"name\"]: yolo_labels.astype(\n tensor_meta[\"labels\"].dtype\n ),\n self.coordinates_params[\"name\"]: yolo_coordinates,\n }\n )\n\n if tensor_meta[\"coordinates\"].htype == \"bbox\":\n append_data_bbox(tensor_meta=tensor_meta).eval(\n self.ingestion_data,\n ds,\n progressbar=progressbar,\n num_workers=num_workers,\n )\n else:\n append_data_polygon(tensor_meta=tensor_meta).eval(\n self.ingestion_data,\n ds,\n progressbar=progressbar,\n num_workers=num_workers,\n )\n\n def structure(self, ds: Dataset, progressbar: bool = True, num_workers: int = 0, shuffle: bool = True): # type: ignore\n # Set class names in the dataset\n if self.data.class_names:\n ds[self.label_params[\"name\"]].info[\"class_names\"] = self.data.class_names\n\n # Set bounding box format in the dataset\n if ds[self.coordinates_params[\"name\"]].meta.htype == \"bbox\":\n ds[self.coordinates_params[\"name\"]].info[\"coords\"] = {\n \"type\": \"fractional\",\n \"mode\": \"CCWH\",\n }\n\n if shuffle:\n rshuffle(self.ingestion_data)\n\n self._ingest_data(ds, progressbar, num_workers)\n\n if self.verify_class_names and self.data.class_names:\n labels = ds[self.label_params.get(\"name\")].numpy(aslist=True)\n\n max_label = max(\n [l.max(initial=0) for l in labels]\n ) # Assume a label is 0 if array is empty. This is technically incorrect, but it's highly unlikely that all labels are empty\n\n if max_label != len(ds[self.label_params.get(\"name\")].info.class_names) - 1:\n raise IngestionError(\n \"Dataset has been created but the largest numeric label in the annotations is inconsistent with the number of classes in the classes file.\"\n )\n","repo_name":"activeloopai/deeplake","sub_path":"deeplake/auto/unstructured/yolo/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":12933,"program_lang":"python","lang":"en","doc_type":"code","stars":7141,"dataset":"github-code","pt":"81"} +{"seq_id":"41653872074","text":"import sensing_tools\nimport time\n\nfrom tinkerforge.ip_connection import IPConnection\nfrom tinkerforge.bricklet_co2 import BrickletCO2\nfrom tinkerforge.bricklet_segment_display_4x7 import BrickletSegmentDisplay4x7\nfrom tinkerforge.bricklet_ambient_light_v2 import BrickletAmbientLightV2\n\nco2_UID = \"EnX\"\nsegment_UID = \"wNw\"\nambient_light_UID = \"yCP\"\n\ndef int2segments(value):\n digit2segments = [0x3f,0x06,0x5b,0x4f,\n 0x66,0x6d,0x7d,0x07,\n 0x7f,0x6f,0x77,0x7c,\n 0x39,0x5e,0x79,0x71] # // 0~9,A,b,C,d,E,F\n\n significant = False\n segment_index = 0\n segment_values = [0x00, 0x00, 0x00, 0x00]\n digit_position = 1000\n while digit_position > 0:\n # Extract digit\n digit = (value // digit_position) % 10\n if digit != 0:\n significant = True\n\n # set segment display value\n if significant or segment_index == 4:\n segment_values[segment_index] = digit2segments[digit]\n\n digit_position = digit_position // 10\n segment_index = segment_index + 1\n\n return segment_values\n\nreporter = sensing_tools.InfluxReporter(\"secrets.json\")\nbrick_config = sensing_tools.read_secrets(\"secrets.json\")[\"tinkerforge\"]\n\nipcon = IPConnection()\nco2_bricklet = BrickletCO2(co2_UID, ipcon)\nsegment_bricklet = BrickletSegmentDisplay4x7(segment_UID, ipcon)\nambient_light_bricklet = BrickletAmbientLightV2(ambient_light_UID, ipcon)\nipcon.connect(brick_config['host'], brick_config['port'])\n\nwhile True:\n co2_concentration = co2_bricklet.get_co2_concentration()\n print(\"CO2 concentration: {} ppm\".format(co2_concentration))\n segment_bricklet.set_segments(int2segments(co2_concentration), 7, False)\n reporter.report(\"co2\", \"tinkerforge\", \"CAB F81\", co2_concentration)\n\n illuminance = ambient_light_bricklet.get_illuminance() / 100.0\n print(\"Illuminance: {} lx\".format(illuminance))\n reporter.report(\"ambient_light\", \"LTR329ALS\", \"CAB F81\", illuminance)\n\n time.sleep(1)\n","repo_name":"pietdevaere/sensor_tools","sub_path":"brick_runner.py","file_name":"brick_runner.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1390525047","text":"\"\"\"Electricity Bill\"\"\"\nTARIFF_11 = 0.244618\nTARIFF_31 = 0.136928\n\nprint(\"Electricity Bill Estimator\")\nchoice = int(input(\"Which tariff? 11 or 31: \"))\nwhile choice != 11 and choice != 31:\n print(\"Invalid tariff\")\n choice = int(input(\"Which tariff? 11 or 31: \"))\n\nif choice == 11:\n cents_per_kwh = TARIFF_11 * 100\nelif choice == 31:\n cents_per_kwh = TARIFF_31 * 100\n\ndaily_use = int(input(\"Enter daily use in kWh: \"))\nbilling_days = int(input(\"Enter number of billing days: \"))\n\nestimated_bill = (cents_per_kwh / 100) * daily_use * billing_days\nprint(f\"Estimated bill: ${estimated_bill}\")\n","repo_name":"joshua-dubbeld/cp1404practicals","sub_path":"prac_01/electricity_bill.py","file_name":"electricity_bill.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35367619065","text":"import text\n\ndef main_menu(): # функция для вывода меню\n for i, item in enumerate(text.menu):\n if i == 0:\n print(item)\n else:\n print(f'\\t{i}. {item}')\n while True:\n choice = input(text.input_menu)\n if choice.isdigit() and 0 < int(choice) < len(text.menu):\n return int(choice)\n else:\n print(text.input_menu_error)\n\ndef print_message(msg: str): # функция для печати сообщений\n print('\\n' + '=' * len(msg))\n print(msg)\n print('=' * len(msg) + '\\n')\n\ndef show_book(book: dict[int, list[str]], msg: str): # функция для печати справочника или сообщения об ошибке\n if book:\n print('\\n' + '*' * 67)\n for i, contact in book.items():\n print(f'{i:>3}. {contact[0]:<20} {contact[1]:<20} {contact[2]:<20}') # равняем по правому краю с отступом 3 символа\n print('=' * 67 + '\\n')\n else:\n print_message(msg)\n\ndef input_contact(msg: str) -> list[str]: # фунция выводит список контактов\n contact = []\n for input_text in msg:\n contact.append(input(input_text))\n return contact\n\ndef input_request(msg: str) -> str:\n return input(msg)","repo_name":"Arman1407/Pithon_","sub_path":"guide_modul/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17867719150","text":"import csv\nimport time\nimport datetime\nimport timedelta\nimport os\n\n## This script takes a full 14-day scrape output from scrapy and converts it\n## to separate 14-day price and volume tables indexed by correct dates\n\nprice_data = {}\nvol_data = {} # read scrapy output\nwith open('full_output.csv', mode='r') as f:\n header = next(f)\n \n reader = csv.reader(f)\n for row in reader: # reformat scrape output columns from alphabetical\n master_list = [] # to chronological\n price_l = []\n vol_l = []\n sorted_price_list = []\n sorted_vol_list = []\n name = row[15]\n for entry in row:\n master_list.append(entry)\n for i in range(15):\n vol_l.append(row[i])\n price_l.append(row[(i+16)])\n sorted_price_list = [name, price_l[0], price_l[1], price_l[7], price_l[8], price_l[9],\n price_l[10], price_l[11], price_l[12], price_l[13], price_l[14],\n price_l[2], price_l[3], price_l[4], price_l[5], price_l[6]]\n sorted_vol_list = [name, vol_l[0], vol_l[1], vol_l[7], vol_l[8], vol_l[9],\n vol_l[10], vol_l[11], vol_l[12], vol_l[13], vol_l[14],\n vol_l[2], vol_l[3], vol_l[4], vol_l[5], vol_l[6]]\n price_data[name] = sorted_price_list\n vol_data[name] = sorted_vol_list\n f.close()\n\ncurrent_date = datetime.date.today()\n\n# replace chronological, variable-named columns with chronological dates\n# save to separate 14day_price.csv and 14day_vol.csv files\nheader = ['name']\nfor d in range(15):\n header.append((current_date - datetime.timedelta(days=d)).strftime('%Y/%m/%d'))\nos.system(\"cd ~/OSRS_Item_Investment_Project/\")\nwith open('data/14day_price.csv', mode='w') as price_file:\n writer = csv.writer(price_file)\n writer.writerow(header)\n for e in price_data:\n writer.writerow(price_data[e])\n price_file.close()\n\n\nwith open('data/14day_vol.csv', mode='w') as vol_file:\n writer = csv.writer(vol_file)\n writer.writerow(header)\n for e in vol_data:\n writer.writerow(vol_data[e])\n vol_file.close()","repo_name":"KenanBiren/OSRS_Item_Investment_Project","sub_path":"src/backend/extract/full_scrape/post_scrape.py","file_name":"post_scrape.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"70733357066","text":"from system.core.controller import *\nimport logging\nimport random\nimport datetime\nimport time\n\nclass Game(Controller):\n\n def __init__(self, action):\n logging.debug(\"init:begin\")\n super(Game, self).__init__(action)\n # self.load_model('GameModel')\n self.db = self._app.db\n\n def index(self):\n logging.debug(\"index:begin\")\n\n # set initial gold value\n if not 'gold' in session:\n session['gold'] = 0\n session['messages'] = []\n\n logging.debug(\"index:end\")\n return self.load_view('index.html')\n\n def process_money(self, methods=['POST']):\n \"\"\" update money balance and set messages \"\"\"\n ds = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n logging.info(\"ds: {}\".format(ds))\n\n updateAmount = 0\n gold = session['gold']\n venue = request.form['venue']\n messages = session['messages']\n\n if venue == 'farm':\n updateAmount = random.randint(10,21)\n\n elif venue == 'cave':\n updateAmount = random.randint(5,10)\n\n elif venue == 'house':\n updateAmount = random.randint(2,5)\n\n else: # is casino by process of elimination\n updateAmount = random.randint(-50,50)\n\n # construct message and add to messages\n if updateAmount < 0:\n message = 'Entered a {} and lost {} coins. Ouch! {}'.format(venue, updateAmount, ds)\n else:\n message = \"Earned {} from the {}! {}\".format(updateAmount,venue, ds)\n\n # save messages in session obj\n logging.info(message)\n messages.insert(0, message)\n\n # save gold in session obj\n gold += updateAmount\n session['gold'] = gold\n\n return redirect('/')\n","repo_name":"guywhorley/codingdojo","sub_path":"dojo/python/mvc/ninja_gold/app/controllers/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6257598458","text":"import os\nfrom subprocess import call\nfrom argparse import ArgumentParser\nimport sys\nfrom lxml import etree\n\n\nclass NexusUploader:\n def __init__(self, root_dir, nexus_url, nexus_repo, repo_id='nexus'):\n self.root_dir = root_dir\n self.nexus_url = nexus_url\n self.nexus_repo = nexus_repo\n self.repo_id = repo_id\n\n def get_pom_files(self):\n poms = []\n\n for dir_name, subdir_list, file_list in os.walk(self.root_dir):\n for file_name in file_list:\n if file_name.endswith('.pom'):\n poms.append(os.sep.join([dir_name, file_name]))\n\n return poms\n\n def upload_with_poms(self):\n poms = self.get_pom_files()\n parser = etree.XMLParser(remove_comments=False)\n\n for index, pom in enumerate(poms):\n print(\"%d/%d: %s\" % (index + 1, len(poms), pom))\n\n for file in os.listdir(os.path.dirname(pom)):\n if file.endswith(\".jar\") and \"-sources\" in file:\n source_file = os.path.join(os.path.dirname(pom), file)\n elif file.endswith(\".jar\"):\n jar_file = os.path.join(os.path.dirname(pom), file)\n\n xml = etree.parse(pom, parser=parser)\n groupId = xml.find(\"./{*}groupId\")\n artifactId = xml.find(\"./{*}artifactId\")\n version = xml.find(\"./{*}version\")\n packaging = xml.find(\"./{*}packaging\")\n if not packaging:\n packaging = \"jar\"\n else:\n packaging = packaging.text\n\n print(\"Running CMD: mvn deploy:deploy-file -DgeneratePom=false -DgroupId=%s -DartifactId=%s -Dversion=%s \"\n \"-Dpackaging=%s -Dfile=%s -DrepositoryId=%s -Durl=%s/repository/%s -DpomFile=%s -Dsources=%s -DuniqueVersion=false\"\n % (groupId.text, artifactId.text, version.text, packaging, jar_file, self.repo_id, self.nexus_url,\n self.nexus_repo, pom, source_file))\n\n call(\"mvn deploy:deploy-file -DgeneratePom=false -DgroupId=%s -DartifactId=%s -Dversion=%s -Dpackaging=%s \"\n \"-Dfile=%s -DrepositoryId=%s -Durl=%s/repository/%s -DpomFile=%s -Dsources=%s -DuniqueVersion=false\"\n % (groupId.text, artifactId.text, version.text, packaging, jar_file, self.repo_id, self.nexus_url,\n self.nexus_repo, pom, source_file), shell=True)\n\ndef main():\n parser = ArgumentParser(description='Helper script to upload artifacts to Nexus')\n parser.add_argument(\"-d\", \"--dir\", help='Directory consisting artifacts to upload', required=True)\n parser.add_argument(\"-n\", \"--url\", help='Nexus server url', required=True)\n parser.add_argument(\"-r\", \"--repo\", help='Nexus Repository name to upload', required=True)\n parser.add_argument(\"-i\", \"--id\", help='repo-id defined in settings.xml which contains nexus credentials.'\n \"Defaults to 'nexus'\")\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args = parser.parse_args()\n root_dir = args.dir\n nexus_url = args.url\n nexus_repo = args.repo\n\n if not os.path.isdir(root_dir):\n print(\"[ERROR] %s dir not found\" % root_dir)\n exit(1)\n\n if args.id is not None:\n repo_id = args.id\n uploader = NexusUploader(root_dir, nexus_url, nexus_repo, repo_id)\n else:\n uploader = NexusUploader(root_dir, nexus_url, nexus_repo)\n\n uploader.upload_with_poms()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"siddeshbg/my-python","sub_path":"Nexus_Uploader/src/nexus_uploader.py","file_name":"nexus_uploader.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5932000858","text":"import os\nimport hmac\nimport logging\nimport json\nimport sys\nfrom http import HTTPStatus\n\n\nimport daiquiri\nimport kafka\nfrom kafka import KafkaProducer\nfrom kafka.admin import KafkaAdminClient, NewTopic\n\nfrom flask import Flask, Response, jsonify, make_response, request, current_app\nfrom prometheus_flask_exporter import PrometheusMetrics\n\n\n__version__ = \"0.2.0-dev\"\n\n\n_DEBUG = os.getenv(\"DEBUG\", False)\n\n\ndaiquiri.setup()\n_LOGGER = daiquiri.getLogger(\"webhook2kafka\")\n_LOGGER.setLevel(logging.DEBUG if _DEBUG else logging.INFO)\n\nKAFAK_BOOTSTRAP_SERVERS = os.getenv(\"KAFKA_BOOTSTRAP_SERVERS\", \"localhost:9092\")\nKAFKA_SSL_CAFILE = os.getenv(\"KAFKA_SSL_CAFILE\", \"datahub-kafka.crt\")\n\nTHOTH_PACKAGE_RELEASES_TOPIC_NAME = \"thoth_package_releases\"\n\n\napp = Flask(__name__)\nmetrics = PrometheusMetrics(app)\nmetrics.info(\"thoth_package_releases_webhook2kafka_info\", \"Thoth's Package Releases webhook2kafka\", version=__version__)\n\n\n@app.after_request\ndef add_app_version(response):\n response.headers[\"X-Thoth-Package-Releases\"] = __version__\n return response\n\n\n@app.route(\"/\")\ndef root():\n return f\"This service is for Bots only, anyway, here is a tiny glimpse into what I am: v{__version__}\"\n\n\n@app.route(\"/healthz\")\n@metrics.do_not_track()\ndef healthz():\n status_code = HTTPStatus.OK\n health = {\"version\": __version__}\n\n return make_response(jsonify(health), status_code)\n\n\n@app.route(\"/webhook\", methods=[\"POST\"])\ndef send_webhook_to_topic():\n \"\"\"Entry point for github webhook.\"\"\"\n resp = Response()\n payload = None\n status_code = HTTPStatus.OK\n payload = request.json\n\n if payload is None:\n _LOGGER.error(\"GitHub webhook payload was empty\")\n return resp, HTTPStatus.INTERNAL_SERVER_ERROR\n\n _publish(THOTH_PACKAGE_RELEASES_TOPIC_NAME, payload)\n\n return resp, status_code\n\n\ndef _publish(topic: str, payload: dict) -> str:\n \"\"\"Publish the given dict to topic.\"\"\"\n producer = None\n status_code = HTTPStatus.OK\n\n if producer is None:\n _LOGGER.debug(\"KafkaProducer was not connected, trying to reconnect...\")\n try:\n producer = KafkaProducer(\n bootstrap_servers=KAFAK_BOOTSTRAP_SERVERS,\n acks=0, # Wait for leader to write the record to its local log only.\n compression_type=\"gzip\",\n value_serializer=lambda v: json.dumps(v).encode(\"utf-8\"),\n security_protocol=\"SSL\",\n ssl_cafile=KAFKA_SSL_CAFILE,\n )\n except kafka.errors.NoBrokersAvailable as excptn:\n _LOGGER.debug(\"while trying to reconnect KafkaProducer: we failed...\")\n _LOGGER.error(excptn)\n return HTTPStatus.INTERNAL_SERVER_ERROR\n\n try:\n future = producer.send(topic, payload)\n result = future.get(timeout=6)\n _LOGGER.debug(result)\n except AttributeError as excptn:\n _LOGGER.debug(excptn)\n status_code = HTTPStatus.INTERNAL_SERVER_ERROR\n except (kafka.errors.NotLeaderForPartitionError, kafka.errors.KafkaTimeoutError) as excptn:\n _LOGGER.error(excptn)\n producer.close()\n producer = None\n\n status_code = HTTPStatus.INTERNAL_SERVER_ERROR\n\n return status_code\n\n\nif __name__ == \"__main__\":\n _LOGGER.info(f\"Thoth's Package Releases webhook2kafka v{__version__} started.\")\n _LOGGER.debug(\"DEBUG mode is enabled!\")\n\n app.config[\"GITHUB_WEBHOOK_SECRET\"] = os.environ.get(\"GITHUB_WEBHOOK_SECRET\")\n\n if os.environ.get(\"KAFKA_CREATE_TOPICS\"):\n topic_list = []\n topic_list.append(NewTopic(name=THOTH_PACKAGE_RELEASES_TOPIC_NAME, num_partitions=1, replication_factor=1))\n\n admin_client = KafkaAdminClient(\n bootstrap_servers=KAFAK_BOOTSTRAP_SERVERS,\n client_id=\"package_releases_multiplexer\",\n security_protocol=\"SSL\",\n ssl_cafile=KAFKA_SSL_CAFILE,\n )\n admin_client.create_topics(new_topics=topic_list, validate_only=False)\n\n _LOGGER.info(f\"running Flask application now...\")\n app.run(host=\"0.0.0.0\", port=8080, debug=_DEBUG)\n","repo_name":"goern/package-releases-multiplexer","sub_path":"webhook2kafka.py","file_name":"webhook2kafka.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42567137837","text":"#!/opt/local/bin/python\n\n\"\"\"\n\nThis script contains several functions to extract parameters from cell trajectories.\nIt is intended to be used as a module to incorporate the functions into a migration analysis pipeline.\nRunning this script directly will generate and display some plots using sample trajectory data.\nThe results are simply plotted, but the data can easily be exported.\n\n\"\"\"\n\nimport itertools\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import linregress\n\ndef extract_mean_inst_speed(x, y, t):\n \"\"\"Returns the mean instantaneous speed for a 2D trajectory\n \n Parameters\n ----------\n x : 1D numpy array\n an array of the x positions\n y : 1D numpy array\n an array of the y positions\n t : 1D numpy array\n an array of the time points\n\n Returns\n -------\n mean_inst_speed : float\n the mean instantaneous speed\n \"\"\"\n\n speeds = np.zeros(len(x) - 1)\n for i in range(len(x) - 1):\n dist = np.sqrt((x[i+1] - x[i])**2 + (y[i+1] - y[i])**2)\n speeds[i] = dist / (t[i+1] - t[i])\n mean_inst_speed = np.mean(speeds)\n\n return mean_inst_speed\n\ndef extract_persistence(x, y):\n \"\"\"Returns the persistence value for a 2D trajectory\n\n Parameters\n ----------\n x : 1D numpy array\n an array of the x positions\n y : 1D numpy array\n an array of the y positions\n\n Returns\n -------\n persistence : float\n the persistence of the trajectory\n \"\"\"\n\n distances = np.zeros(len(x) - 1)\n for i in range(len(x) - 1):\n distances[i] = np.sqrt((x[i+1] - x[i])**2 + (y[i+1] - y[i])**2)\n full_dist = np.linalg.norm((x[-1] - x[0], y[-1] - y[0]))\n persistence = full_dist / np.sum(distances)\n\n return persistence\n\ndef extract_msd(x, y, t):\n \"\"\"Returns the mean squared displacement and power-law exponent for a 2D trajectory\n\n Parameters\n ----------\n x : 1D numpy array\n an array of the x positions\n y : 1D numpy array\n an array of the y positions\n t : 1D numpy array\n an array of the time points\n\n Returns\n -------\n msd : 1D numpy array\n the mean squared displacement\n time_lag : 1D numpy array\n the time lag of the msd\n slope : float\n the power-law exponent from the fit of the msd\n intercept : float\n the y-intercept from the power-law fit of the msd\n \"\"\"\n\n # extracts the msd (this can be done much faster using a fft)\n msd = np.zeros_like(x) * np.nan\n for i in range(len(x)):\n displ_sq = np.zeros_like(x) * np.nan\n for j in range(len(x) - i):\n displ_sq[j] = (x[j + i] - x[j]) ** 2 + (y[j + i] - y[j]) ** 2\n msd[i] = np.nanmean(displ_sq)\n\n #fits to find power-law slope (linear regression of log-transform)\n result = linregress(np.log(t[1:15]), np.log(msd[1:15]))\n slope, intercept, r_value, p_value, std_err = result\n\n return t, msd, (slope, intercept)\n\ndef extract_dir_corr(traj_list):\n \"\"\"Extracts the directional correlation vs. starting distance for a list of trajectories\n\n Parameters\n ----------\n traj_list : list of dictionaries\n each dictionary should be a trajectory containing: 'x' (x positions), 'y' (y positions), 't' (time points)\n\n Returns\n -------\n start_dist_list : 1D numpy array\n a list of the starting distances between trajectories\n mean_corr_list : 1D numpy array\n a list of the mean directional correlation corresponding to start_dist_list\n \"\"\"\n\n #make list of all possible trajectory combinations\n iter_list = itertools.combinations(range(len(traj_list)), 2)\n # comb_list = list(itertools.islice(iter_list,0,None,1000)) # do this if you want to get only a sample\n comb_list = list(itertools.islice(iter_list, None))\n\n #make arrays to keep track of the starting distance and mean correlation\n mean_dist_list = []\n mean_corr_list = []\n\n #iterates through the combinations\n for idxs in comb_list:\n\n #gets the trajectories to analyze in this iteration\n traj1 = traj_list[idxs[0]]\n traj2 = traj_list[idxs[1]]\n\n #determines the overlapping timepoints\n traj1_overlap_idx = np.argwhere(np.isin(traj1['t'],traj2['t']))\n\n #only analyze if there is any overlap\n if len(traj1_overlap_idx) > 0:\n\n # for overlapping timepionts, makes a new array of the xyt coordinates\n traj1_overlap = np.array([traj1['x'][traj1_overlap_idx],\n traj1['y'][traj1_overlap_idx],\n traj1['t'][traj1_overlap_idx]])\n\n # finds the corresponding timepoints in traj2 and gets the xyt coordinates\n traj2_overlap_idx = np.argwhere(np.isin(traj2['t'],traj1['t']))\n traj2_overlap = np.array([traj2['x'][traj2_overlap_idx],\n traj2['y'][traj2_overlap_idx],\n traj2['t'][traj2_overlap_idx]])\n\n #calculates the mean correlation for the x,y data across the overlapping timepoints\n dist_list, corr_list = get_corr(traj1_overlap[:2,:], traj2_overlap[:2,:])\n mean_dist = np.nanmean(dist_list)\n mean_corr = np.nanmean(corr_list)\n\n # append to lists\n mean_dist_list.append(mean_dist)\n mean_corr_list.append(mean_corr)\n\n return mean_dist_list, mean_corr_list\n\ndef get_corr(traj1,traj2):\n \"\"\"calculates the directional correlations between two 2D trajectories\n\n Parameters\n ----------\n traj1 : 2D list\n a list for trajectory one with the form [[x1, x2, xn],[y1, y2, yn]]\n traj2 : 2D list\n a list for trajectory one with the form [[x1, x2, xn],[y1, y2, yn]]\n\n Returns\n -------\n dist_list : 1D numpy array\n an array of the distances between the trajectories at each time point\n corr_list : 1D numpy array\n an array of the correlations at each time point\n\n \"\"\"\n\n #calculates the distances between the trajectories at each time step\n dist_list = np.linalg.norm((traj2[0] - traj1[0], traj2[1] - traj1[1]),axis=0)\n\n #calculates the displacement angles and the directional correlations\n traj1_angles = np.arctan2(traj1[1][1:] - traj1[1][:-1], traj1[0][1:] - traj1[0][:-1])\n traj2_angles = np.arctan2(traj2[1][1:] - traj2[1][:-1], traj2[0][1:] - traj2[0][:-1])\n corr_list = np.cos(traj2_angles - traj1_angles)\n\n return dist_list, corr_list\n\ndef test_migration_analysis(x,y,t):\n \"\"\"Calculates trajectory parameters and plots the data to display\n\n Parameters\n ----------\n x : 1D numpy array\n an array of the x positions\n y : 1D numpy array\n an array of the y positions\n t : 1D numpy array\n an array of the time points\n \"\"\"\n\n print(\"Mean Inst. Speed:\", extract_mean_inst_speed(x,y,t), \"um/min\")\n print(\"Persistence: \", extract_persistence(x,y))\n\n #plots the trajectory from the origin\n fig, ax = plt.subplots()\n cmap = plt.cm.get_cmap('plasma')\n n = len(x)\n colors = [cmap(1. * i / (n - 1)) for i in range(n - 1)]\n ax.set_prop_cycle('color', colors)\n for i in range(n - 1):\n ax.plot(x[i:i + 2], y[i:i + 2])\n ax.plot(x[0],y[0],'o',color=cmap(0))\n ax.plot(x[-1],y[-1],'o',color=cmap(np.inf))\n\n ax_max = max(max(np.sqrt(x**2)),max(np.sqrt(y**2)))\n ax_max = ax_max + ax_max * 0.1\n ax.set_xlim(-ax_max,ax_max)\n ax.set_ylim(-ax_max,ax_max)\n ax.set_xlabel('Displacement ($x$, $\\mu$m)')\n ax.set_ylabel('Displacement ($y$, $\\mu$m)')\n plt.tight_layout()\n\n #plots the msd with a power-law fit\n fig2, ax2 = plt.subplots()\n t_lag, msd, (alpha, intercept) = extract_msd(x,y,t)\n ax2.plot(t_lag,msd,'bo',label=\"Raw Data\")\n t_fit = np.linspace(t_lag[1],t_lag[15],10)\n msd_fit = t_fit ** alpha * np.exp(intercept)\n ax2.plot(t_fit,msd_fit,'b-',label=r\"$\\alpha=%.2f$\"%alpha)\n ax2.set_xscale('log')\n ax2.set_yscale('log')\n ax2.set_xlabel('Time Lag (min.)')\n ax2.set_ylabel('MSD ($\\mu$m)')\n ax2.legend(loc='upper left')\n plt.tight_layout()\n\n plt.show()\n\ndef main():\n \"\"\"Uses some simulated data to test the trajectory analysis\"\"\"\n\n #set a pixel size and time interval for the tests\n px_size = 1 #pixel size (in um, e.g.)\n time_int = 5 #time interval (in min, e.g.)\n\n # uses some made-up data\n print(\"Made-up Trajectory\")\n x = np.array([0,1,3,4,3,6,8,7,10,12,14,13,12,13,15,17,19,20,21,20,19,20,19,21,15,16,19,17,16]) * px_size\n y = np.array([0,2,1,3,1,2,5,7,6,8,9,12,10,11,10,11,14,14,12,13,14,12,12,10,9,10,8,7,7]) * px_size\n t = np.arange(len(x)) * time_int\n test_migration_analysis(x,y,t)\n\n # uses a perfectly straight trajectory\n print(\"Straight Trajectory\")\n x = np.arange(100)\n y = np.zeros(100)\n t = np.arange(len(x)) * time_int\n test_migration_analysis(x,y,t)\n\n # simulates a 2D random walk\n print(\"Random Walk\")\n steps_x = np.random.randint(-2,3,size=(100,2))\n steps_y = np.random.randint(-2,3,size=(100,2))\n xy = np.concatenate([steps_x,steps_y]).cumsum(0)\n x = xy[:,0]\n y = xy[:,1]\n t = np.arange(len(x)) * time_int\n test_migration_analysis(x,y,t)\n\nif __name__==\"__main__\":\n main()\n","repo_name":"agclark12/tumor_migration_analysis","sub_path":"tumor_migration_analysis/migration_analysis.py","file_name":"migration_analysis.py","file_ext":"py","file_size_in_byte":9261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44496370867","text":"\"\"\"A processor for the text classification task.\"\"\"\n\nfrom __future__ import annotations\n\nfrom collections.abc import Iterable\nfrom typing import Any\n\nfrom explainaboard import TaskType\nfrom explainaboard.analysis import feature\nfrom explainaboard.analysis.analyses import (\n Analysis,\n AnalysisLevel,\n BucketAnalysis,\n CalibrationAnalysis,\n ComboCountAnalysis,\n)\nfrom explainaboard.analysis.feature import FeatureType\nfrom explainaboard.analysis.feature_funcs import (\n count_tokens,\n feat_freq_rank,\n feat_length_freq,\n feat_num_oov,\n get_basic_words,\n get_lexical_richness,\n)\nfrom explainaboard.info import SysOutputInfo\nfrom explainaboard.metrics.accuracy import AccuracyConfig\nfrom explainaboard.metrics.metric import MetricConfig\nfrom explainaboard.processors.processor import Processor\nfrom explainaboard.utils.logging import progress\nfrom explainaboard.utils.typing_utils import unwrap\n\n\nclass TextClassificationProcessor(Processor):\n \"\"\"A processor for the text classification task.\"\"\"\n\n @classmethod\n def task_type(cls) -> TaskType:\n \"\"\"See Processor.task_type.\"\"\"\n return TaskType.text_classification\n\n def default_analysis_levels(self) -> list[AnalysisLevel]:\n \"\"\"See Processor.default_analysis_levels.\"\"\"\n features: dict[str, FeatureType] = {\n \"text\": feature.Value(\n dtype=feature.DataType.STRING,\n description=\"the text of the example\",\n ),\n \"true_label\": feature.Value(\n dtype=feature.DataType.STRING,\n description=\"the true label of the input\",\n ),\n \"predicted_label\": feature.Value(\n dtype=feature.DataType.STRING,\n description=\"the predicted label\",\n ),\n \"confidence\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"the confidence of the predicted label\",\n max_value=1.0,\n min_value=0.0,\n optional=True,\n ),\n \"text_length\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"text length in tokens\",\n func=lambda info, x, c: count_tokens(info, x[\"text\"]),\n ),\n \"text_chars\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"text length in characters\",\n func=lambda info, x, c: len(x[\"text\"]),\n ),\n \"basic_words\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"the ratio of basic words\",\n func=lambda info, x, c: get_basic_words(x[\"text\"]),\n ),\n \"lexical_richness\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"lexical diversity\",\n func=lambda info, x, c: get_lexical_richness(x[\"text\"]),\n ),\n \"num_oov\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"the number of out-of-vocabulary words\",\n require_training_set=True,\n func=lambda info, x, c, stat: feat_num_oov(\n info, x[\"text\"], stat[\"vocab\"]\n ),\n ),\n \"fre_rank\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=(\n \"the average rank of each word based on its frequency in \"\n \"training set\"\n ),\n require_training_set=True,\n func=lambda info, x, c, stat: feat_freq_rank(\n info, x[\"text\"], stat[\"vocab_rank\"]\n ),\n ),\n \"length_fre\": feature.Value(\n dtype=feature.DataType.FLOAT,\n description=\"the frequency of text length in training set\",\n require_training_set=True,\n func=lambda info, x, c, stat: feat_length_freq(\n info, x[\"text\"], stat[\"length_fre\"]\n ),\n ),\n }\n\n return [\n AnalysisLevel(\n name=\"example\",\n features=features,\n metric_configs=self.default_metrics(),\n )\n ]\n\n def default_analyses(self) -> list[Analysis]:\n \"\"\"See Processor.default_analyses.\"\"\"\n features = self.default_analysis_levels()[0].features\n # Create analyses\n analyses: list[Analysis] = [\n BucketAnalysis(\n level=\"example\",\n description=features[\"true_label\"].description,\n feature=\"true_label\",\n method=\"discrete\",\n num_buckets=15,\n ),\n CalibrationAnalysis(\n level=\"example\",\n description=\"calibration analysis\",\n feature=\"confidence\",\n num_buckets=10,\n ),\n ComboCountAnalysis(\n level=\"example\",\n description=\"confusion matrix\",\n features=(\"true_label\", \"predicted_label\"),\n ),\n ]\n analyses.extend(self.continuous_feature_analyses())\n return analyses\n\n @classmethod\n def default_metrics(\n cls,\n level: str = \"example\",\n source_language: str | None = None,\n target_language: str | None = None,\n ) -> dict[str, MetricConfig]:\n \"\"\"See Processor.default_metrics.\"\"\"\n return {\"Accuracy\": AccuracyConfig()}\n\n def _statistics_func(self, samples: Iterable[Any], sys_info: SysOutputInfo):\n vocab: dict[str, float] = {}\n length_fre: dict[int, float] = {}\n total_samps = 0\n tokenizer = unwrap(sys_info.source_tokenizer)\n for sample in progress(samples):\n text = sample[\"text\"]\n tokens = tokenizer(text)\n length = len(tokens)\n\n length_fre[length] = length_fre.get(length, 0.0) + 1.0\n\n # update vocabulary\n for w in tokens:\n vocab[w] = vocab.get(w, 0.0) + 1.0\n\n total_samps += 1\n\n # the rank of each word based on its frequency\n sorted_dict = {\n key: rank\n for rank, key in enumerate(sorted(set(vocab.values()), reverse=True), 1)\n }\n vocab_rank = {k: sorted_dict[v] for k, v in vocab.items()}\n\n for k, v in length_fre.items():\n length_fre[k] = v * 1.0 / total_samps\n\n return {\"vocab\": vocab, \"vocab_rank\": vocab_rank, \"length_fre\": length_fre}\n","repo_name":"neulab/ExplainaBoard","sub_path":"explainaboard/processors/text_classification.py","file_name":"text_classification.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","stars":356,"dataset":"github-code","pt":"81"} +{"seq_id":"20548107785","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom api.utils import ShoppingCartItem, get_grocery_list\nfrom recipes.models import Ingredient, Recipe, RecipeIngredient, ShoppingCart\n\nUser = get_user_model()\n\n\nclass TestShoppingList(TestCase):\n \"\"\"Set of tests to check if the shopping list util behaves correctly.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.ingredient1 = Ingredient.objects.create(\n name='Pumpkin', measurement_unit='ea'\n )\n cls.ingredient2 = Ingredient.objects.create(\n name='Chicken', measurement_unit='g.'\n )\n cls.ingredient3 = Ingredient.objects.create(\n name='Salt', measurement_unit='to taste'\n )\n cls.user = User.objects.create_user(\n username='cook',\n email='e1@mail.com',\n first_name='Cook',\n last_name='Cook',\n password='pas$W0rd',\n )\n cls.user2 = User.objects.create_user(\n username='amateur',\n email='e2@mail.com',\n first_name='Cook',\n last_name='Cook',\n password='pas$W0rd',\n )\n presets = {\n 'cooking_time': 15,\n 'author': cls.user,\n }\n cls.recipe1 = Recipe.objects.create(**presets, name='1')\n cls.recipe2 = Recipe.objects.create(**presets, name='2')\n cls.recipe3 = Recipe.objects.create(**presets, name='3')\n\n RecipeIngredient.objects.create(\n recipe=cls.recipe1, ingredient=cls.ingredient1, amount=100\n )\n RecipeIngredient.objects.create(\n recipe=cls.recipe1, ingredient=cls.ingredient2, amount=100\n )\n RecipeIngredient.objects.create(\n recipe=cls.recipe2, ingredient=cls.ingredient2, amount=100\n )\n RecipeIngredient.objects.create(\n recipe=cls.recipe3, ingredient=cls.ingredient3, amount=10\n )\n\n cls.recipe1.ingredients.add(cls.ingredient1)\n cls.recipe1.ingredients.add(cls.ingredient2)\n cls.recipe2.ingredients.add(cls.ingredient2)\n cls.recipe3.ingredients.add(cls.ingredient3)\n ShoppingCart.objects.create(user=cls.user, recipe=cls.recipe1)\n ShoppingCart.objects.create(user=cls.user, recipe=cls.recipe2)\n\n def test_empty_grocery_list(self):\n \"\"\"Check for empty dictionary if there're not recipes in list.\"\"\"\n\n result = get_grocery_list(self.user2)\n self.assertIsInstance(result, list)\n self.assertListEqual(result, [])\n\n def test_convert_to_human_readable_format(self):\n \"\"\"Does get_grocery_list work as intended?\"\"\"\n\n result = get_grocery_list(self.user)\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 2)\n self.assertIn(\n ShoppingCartItem(\n name='Pumpkin', measurement_unit='ea', amount=100\n ),\n result,\n )\n self.assertIn(\n ShoppingCartItem(\n name='Chicken', measurement_unit='g.', amount=200\n ),\n result,\n )\n","repo_name":"holohup/foodgram-project-react","sub_path":"backend/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17265785926","text":"import datetime\r\nimport glob\r\nimport os\r\nimport re\r\nimport shutil\r\nimport subprocess\r\nimport json\r\nfrom distutils.dir_util import copy_tree\r\n\r\nBUILD_PATH = os.getcwd()\r\n\r\nEDEN_PATH = f\"{os.getcwd()}/../eden\"\r\nVIS_CTI_PATH = f\"{os.getcwd()}/../VIS_CTI\"\r\nPYAUTOCONF_PATH = f\"{os.getcwd()}/../PyAutoConf\"\r\nPYAUTOFIT_PATH = f\"{os.getcwd()}/../PyAutoFit\"\r\nPYAUTOARRAY_PATH = f\"{os.getcwd()}/../PyAutoArray\"\r\nPYAUTOCTI_PATH = f\"{os.getcwd()}/../PyAutoCTI\"\r\n\r\nFOLDERS_OMIT = [\"VIS_CTI_AutoArray_Plot\", \"VIS_CTI_AutoCTI_Plot\"]\r\n\r\ndef main():\r\n\r\n # move files from eden to VIS_CTI\r\n\r\n os.chdir(VIS_CTI_PATH)\r\n\r\n for x in [t[0] for t in os.walk(\"../eden_2\")]:\r\n\r\n vis_cti_path = f\"{VIS_CTI_PATH}/{x}\"\r\n os.chdir(vis_cti_path)\r\n\r\n if not sum([folder in vis_cti_path for folder in FOLDERS_OMIT]):\r\n\r\n for f in glob.glob(\"modules.json\"):\r\n with open(f) as infile:\r\n modules = json.load(infile)\r\n for module, command in modules.items():\r\n if command == \"all\":\r\n file_list = os.listdir(f\"{EDEN_PATH}/{module}\")\r\n elif command == \"*.py\":\r\n file_list = os.listdir(f\"{EDEN_PATH}/{module}\")\r\n file_list = [file for file in file_list if file.endswith(\".py\")]\r\n else:\r\n file_list = command.strip('][').split(', ')\r\n\r\n for file in file_list:\r\n\r\n file_path = f\"{EDEN_PATH}/{module}/{file}\"\r\n\r\n if os.path.isdir(file_path):\r\n shutil.copytree(file_path, f\"{vis_cti_path}/{file}\", dirs_exist_ok=True)\r\n else:\r\n shutil.copy(file_path, vis_cti_path)\r\n\r\n os.chdir(VIS_CTI_PATH)\r\n\r\n for x in [t[0] for t in os.walk(\"../eden_2\")]:\r\n\r\n vis_cti_path = f\"{VIS_CTI_PATH}/{x}\"\r\n os.chdir(vis_cti_path)\r\n\r\n for f in glob.glob(\"tests/python/*\"):\r\n if \"__init__.py\" in f:\r\n if os.path.isfile(f):\r\n os.remove(f)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Jammy2211/eden","sub_path":"move_eden_to_vis_cti.py","file_name":"move_eden_to_vis_cti.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36019801578","text":"ANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: osx_group_member\n\nshort_description: Add or remove users from groups on macOS\n\nversion_added: \"2.8\"\n\ndescription:\n - \"Add or remove users from groups on macOS.\"\"\n\noptions: TBD\n\nauthor:\n - Dale Sedivec (@dsedivec)\n\"\"\"\n\nEXAMPLES = \"\"\"\nTBD\n\"\"\"\n\nRETURN = \"\"\"\nTBD\n\"\"\"\n\n# fmt: off\n\nimport os\nimport subprocess\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n# fmt: on\n\n\ndef run_module():\n module_args = dict(\n group=dict(type=\"str\", required=True),\n user=dict(type=\"str\", required=True),\n state=dict(type=\"str\", choices=(\"present\", \"absent\")),\n )\n module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)\n should_be_in_group = module.params[\"state\"] == \"present\"\n checkmember_rc = subprocess.call(\n [\n \"dseditgroup\",\n \"-o\",\n \"checkmember\",\n \"-m\",\n module.params[\"user\"],\n module.params[\"group\"],\n ]\n )\n if checkmember_rc == 0:\n is_in_group = True\n elif checkmember_rc == os.EX_NOUSER:\n is_in_group = False\n elif checkmember_rc == os.EX_USAGE:\n module.fail_json(\n msg=(\n \"dseditgroup says group %r does not exist (exited %r)\"\n % (module.params[\"group\"], checkmember_rc)\n )\n )\n else:\n module.fail_json(\n msg=\"dseditgroup -o checkmember exited %r\" % (checkmember_rc,)\n )\n result = dict(changed=should_be_in_group != is_in_group)\n if result[\"changed\"] and not module.check_mode:\n subprocess.check_call(\n [\n \"dseditgroup\",\n \"-o\",\n \"edit\",\n \"-t\",\n \"user\",\n \"-a\" if should_be_in_group else \"-d\",\n module.params[\"user\"],\n module.params[\"group\"],\n ]\n )\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dsedivec/ansible_dlc","sub_path":"plugins/modules/osx_group_member.py","file_name":"osx_group_member.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10054945199","text":"import os\nimport sys\nimport librosa\nimport time\nimport moviepy.editor as mp\nimport numpy as np\nfrom glob import glob\nfrom scipy import signal\nfrom pathlib import Path\n\nclass VideoSynchTrimming:\n '''Class of functions for time synchronizing and trimming video files based on cross correlaiton of their audio.'''\n \n def __init__(self):\n '''Initialize VideoSynchTrimmingClass'''\n pass\n\n def get_clip_list(self, base_path, file_type):\n '''Return a list of all video files in the base_path folder that match the given file type.'''\n\n # change directory to folder containing raw videos\n os.chdir(base_path / \"RawVideos\")\n\n # create general search from file type to use in glob search, including cases for upper and lowercase file types\n file_extension_upper = '*' + file_type.upper()\n file_extension_lower = '*' + file_type.lower()\n \n # make list of all files with file type\n clip_list = glob(file_extension_upper) + glob(file_extension_lower) #if two capitalization standards are used, the videos may not be in original order\n \n # because glob behaves differently on windows vs. mac/linux, we collect all files both upper and lowercase, and remove redundant files that appear on windows\n unique_clip_list = []\n [unique_clip_list.append(clip) for clip in clip_list if clip not in unique_clip_list]\n \n os.chdir(base_path)\n return unique_clip_list\n\n def get_files(self, base_path, clip_list):\n '''Get video files from clip_list, extract the audio, and put the video and audio files in a list.\n Return a list of lists containing the video file name and file, and audio name and file.\n Also return a list containing the audio sample rate from each file.'''\n \n # create empty list for storing audio and video files, will contain sublists formatted like [video_file_name,video_file,audio_file_name,audio_file] \n file_list = []\n\n # create empty list to hold audio sample rate, so we can verify samplerate is the same across all audio files\n sample_rate_list = []\n\n video_path = base_path / \"RawVideos\"\n\n audio_path = base_path / \"AudioFiles\"\n os.makedirs(audio_path, exist_ok=True)\n\n # iterate through clip_list, open video files and audio files, and store in file_list\n for clip in clip_list:\n # take vid_name and change extension to create audio file name\n vid_name = clip\n audio_name = clip.split(\".\")[0] + '.wav'\n # open video files\n video_file = mp.VideoFileClip(str(video_path / vid_name), audio=True)\n\n # get length of video clip\n vid_length = video_file.duration\n\n # create .wav file of clip audio\n video_file.audio.write_audiofile(str(audio_path / audio_name))\n\n # extract raw audio from Wav file\n audio_signal, audio_rate = librosa.load(audio_path / audio_name, sr = None)\n sample_rate_list.append(audio_rate)\n\n # save video and audio file names and files in list\n file_list.append([vid_name, video_file, audio_name, audio_signal])\n\n # print relevant video and audio info\n print(\"video length:\", vid_length, \"seconds\", \"audio sample rate\", audio_rate, \"Hz\")\n\n return file_list, sample_rate_list\n\n def get_fps_list(self, file_list):\n '''Retrieve frames per second of each video clip in file_list'''\n return [file[1].fps for file in file_list]\n\n def check_rates(self, rate_list):\n '''Check if audio sample rates or audio frame rates are equal, throw an exception if not (or if no rates are given).'''\n if len(rate_list) == 0:\n raise Exception(\"no rates given\")\n else:\n if rate_list.count(rate_list[0]) == len(rate_list):\n print(\"all rates are equal to\", rate_list[0])\n return rate_list[0]\n else:\n raise Exception(f\"rates are not equal, rates are {rate_list}\")\n\n def normalize_audio(self, audio_file):\n '''Perform z-score normalization on an audio file and return the normalized audio file - this is best practice for correlating.'''\n return ((audio_file - np.mean(audio_file))/np.std(audio_file - np.mean(audio_file)))\n\n def cross_correlate(self, audio1, audio2):\n '''Take two audio files, sync them using cross correlation, and trim them to the same length.\n Inputs are two WAV files to be synced. Return the lag expressed in terms of the audio sample rate of the clips.\n '''\n\n # compute cross correlation with scipy correlate function, which gives the correlation of every different lag value\n # mode='full' makes sure every lag value possible between the two signals is used, and method='fft' uses the fast fourier transform to speed the process up \n corr = signal.correlate(audio1, audio2, mode='full', method='fft')\n # lags gives the amount of time shift used at each index, corresponding to the index of the correlate output list\n lags = signal.correlation_lags(audio1.size, audio2.size, mode=\"full\")\n # lag is the time shift used at the point of maximum correlation - this is the key value used for shifting our audio/video\n lag = lags[np.argmax(corr)]\n \n print(\"lag:\", lag)\n\n return lag\n\n def find_lags(self, file_list, sample_rate):\n '''Take a file list containing video and audio files, as well as the sample rate of the audio, cross correlate the audio files, and output a lag list.\n The lag list is normalized so that the lag of the latest video to start in time is 0, and all other lags are positive.\n '''\n \n lag_list = [self.cross_correlate(file_list[0][3],file[3])/sample_rate for file in file_list] # cross correlates all audio to the first audio file in the list\n #also divides by the audio sample rate in order to get the lag in seconds\n \n\n #now that we have our lag array, we subtract every value in the array from the max value\n #this creates a normalized lag array where the latest video has lag of 0\n #the max value lag represents the latest video - thanks Oliver for figuring this out\n norm_lag_list = [(max(lag_list) - value) for value in lag_list]\n \n print(\"original lag list: \", lag_list, \"normalized lag list: \", norm_lag_list)\n \n return norm_lag_list\n\n def trim_videos(self, file_list, lag_list, base_path):\n # this takes a list of video files and a list of lags, and shortens the beginning of the video by the lags, and trims the ends so they're all the same length\n \n # create new SyncedVideos folder\n synced_path = base_path / \"SyncedVideos\"\n os.makedirs(synced_path, exist_ok=True)\n\n # change directory to SyncedVideos folder\n os.chdir(synced_path)\n \n front_trimmed_videos = []\n\n # for each video in the list, create a new video trimmed from the begining by the lag value for that video, and add it to the empty list\n for i in range(len(file_list)):\n print(file_list[i][1])\n front_trimmed_video = file_list[i][1].subclip(lag_list[i],file_list[i][1].duration)\n #front_trimmed_video = file_list[i][1].subclip(lag_list[i]) # this is a cleaner way of writing this, but needs testing\n front_trimmed_videos.append([file_list[i][0], front_trimmed_video])\n \n print(front_trimmed_videos)\n\n # now we find the duration of each video and add it to a list to find the shortest video duration\n min_duration = min([video[1].duration for video in front_trimmed_videos])\n print(f\"shortest video is {min_duration}\")\n\n # create list to store names of final videos\n video_names = []\n # trim all videos to length of shortest video, and give it a new name\n \n for video in front_trimmed_videos:\n print(video)\n fully_trimmed_video = video[1].subclip(0,min_duration)\n if video[0].split(\"_\")[0] == \"raw\":\n video_name = \"synced_\" + video[0][4:]\n else:\n video_name = \"synced_\" + video[0]\n video_names.append(video_name) #add new name to list to reference for plotting\n fully_trimmed_video.write_videofile(video_name)\n print(f\"Cam name: {video_name}, Video Duration: {fully_trimmed_video.duration}\")\n\n # reset our working directory\n os.chdir(base_path)\n\n return video_names # return names of new videos to reference for plotting\n\ndef main():\n '''Run the functions from the VideoSynchTrimming class to sync all videos with the given file type in the base path folder.\n Takes 2 command line arguments, session ID and folder path, with default arguments to allow paths to be entered manually.\n '''\n\n # start timer to measure performance\n start_timer = time.time()\n\n # get arguments from command line\n args = sys.argv[1:]\n\n #parse arguments from command line, with excepts covering hardcoded default values - maybe get rid of these try/except for final script\n try:\n sessionID = args[0]\n except: \n sessionID = \"partial_charuco_test_7_27_22\"\n try:\n fmc_data_path = args[1]\n except: \n fmc_data_path = Path(\"/Users/Philip/freemocap_data/\")\n\n base_path = fmc_data_path / sessionID\n\n # instantiate class\n synch_and_trim = VideoSynchTrimming()\n synch_and_trim # this may be unnecessary?\n\n # set the base path and file type\n file_type = \"MP4\" # should work with or without a period at the front, and in either case\n \n # create list of video clip in base path folder\n clip_list = synch_and_trim.get_clip_list(base_path, file_type)\n\n # get the files and store in list\n files, sr = synch_and_trim.get_files(base_path, clip_list)\n\n # find the frames per second of each video\n fps = synch_and_trim.get_fps_list(files)\n \n # check that our frame rates and audio sample rates are all equal\n synch_and_trim.check_rates(fps)\n synch_and_trim.check_rates(sr)\n \n # find the lags\n lag_list = synch_and_trim.find_lags(files, synch_and_trim.check_rates(sr))\n \n # use lags to trim the videos\n trimmed_videos = synch_and_trim.trim_videos(files, lag_list, base_path)\n\n # end performance timer\n end_timer = time.time()\n \n #calculate and display elapsed processing time\n elapsed_time = end_timer - start_timer\n print(\"elapsed processing time in seconds:\", elapsed_time)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HuMoN-Research-Lab/shared_utilities","sub_path":"SlimVideoSynchAndTrim.py","file_name":"SlimVideoSynchAndTrim.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37914796376","text":"from easyinput import *\nimport sys\n\n\ndef sumalinea(line):\n suma = 0\n for i in line:\n suma += int(i)\n return suma\n\n\ncounter3 = 0\n\nline = sys.stdin.readline().rstrip().split()\n\nwhile line != []:\n\n for i in line:\n if int(i) % 10 == 3:\n counter3 += 1\n if counter3 % 2 == 0 and counter3 != 0:\n print(sumalinea(line))\n break\n else:\n counter3 = 8\n\n line = sys.stdin.readline().rstrip().split()\nif(line == []):\n print(-1)\n","repo_name":"GlacyalWolf/programingAlgoritms2","sub_path":"Exams/Examen2_2021/sumThatLine.py","file_name":"sumThatLine.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28439529528","text":"import subprocess\nfrom typing import TYPE_CHECKING\nimport logging\n\ntry:\n import angr\nexcept ImportError:\n syscall_agent = None\n\ntry:\n import syscall_agent\nexcept ImportError:\n syscall_agent = None\n\n\nfrom . import Analyzer\n\nif TYPE_CHECKING:\n from .angr_project import angrProjectAnalyzer\n\n\n_l = logging.getLogger(__name__)\n\n\nclass angrUltimateTracerAnalyzer(Analyzer):\n \"\"\"\n Construct an angr project with ultimate tracer enabled. All syscalls will be out-sourced to an external syscall\n agent.\n \"\"\"\n def __init__(self, target, project_analyzer: 'angrProjectAnalyzer'):\n if angr is None or syscall_agent is None:\n raise ImportError(\"Failed to import angr or syscall_agent. Make sure angr and syscall_agent are installed.\")\n\n super().__init__(target)\n self.project_analyzer: 'angrProjectAnalyzer' = project_analyzer\n\n def _invoke_syscall_agent(self, project: 'angr.Project') -> subprocess.Popen:\n \"\"\"\n Invoke the expected syscall agent.\n \"\"\"\n agent = syscall_agent.manager.get_agent(project.arch.name)\n if agent is None:\n raise RuntimeError(\"Cannot find a syscall agent for project %r (architecture %s)\" % (project,\n project.arch.name))\n project.bureau.start() # get it ready to receive connections\n proc = agent.launch(\"tcp://127.0.0.1:%d\" % project.bureau.zmq_port) # launch the agent process\n return proc\n\n def make_project(self):\n if self.project_analyzer.project is not None:\n return self.project_analyzer.project\n\n engine = angr.engines.UberEngineSyscallTracing\n if self.project_analyzer.project is not None:\n _l.warning(\"An angr project was created. Destroying it.\")\n self.project_analyzer.project = None\n\n project = self.project_analyzer.fire(project_kwargs={\n 'auto_load_libs': True,\n 'engine': engine,\n 'use_sim_procedures': False,\n })\n\n return project\n\n def fire(self, *args, state: 'angr.SimState'=None, **kwargs):\n project = self.make_project()\n proc = self._invoke_syscall_agent(project)\n\n if state is None:\n raise ValueError('\"state\" must be specified')\n\n sim_manager = project.factory.simulation_manager(state)\n results = sim_manager.explore()\n\n print(results.deadended[0].posix.dumps(1).decode(\"ascii\"))\n\n # terminate the agent\n proc.terminate()\n","repo_name":"ucsb-seclab/heapster","sub_path":"heapster-env/angr-dev/archr/archr/analyzers/angr_ultimate_tracer.py","file_name":"angr_ultimate_tracer.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"81"} +{"seq_id":"771983847","text":"# This file is for training on AI Platform with scikit-learn.\r\n\r\n\r\n# [START setup]\r\nimport datetime\r\nimport os\r\nimport subprocess\r\nimport sys\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import svm\r\nimport joblib\r\n\r\n# Fill in your Cloud Storage bucket name\r\nBUCKET_NAME = 'x-circle-314022-mposyandu'\r\n# [END setup]\r\n\r\n# [START download-data]\r\nmposyandu_data_filename = 'mposyandu_data.csv'\r\nmposyandu_target_filename = 'mposyandu_target.csv'\r\ndata_dir = 'gs://x-circle-314022-mposyandu'\r\n\r\n# gsutil outputs everything to stderr so we need to divert it to stdout.\r\nsubprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,\r\n mposyandu_data_filename),\r\n mposyandu_data_filename], stderr=sys.stdout)\r\nsubprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,\r\n mposyandu_target_filename),\r\n mposyandu_target_filename], stderr=sys.stdout)\r\n# [END download-data]\r\n\r\n\r\n# [START load-into-pandas]\r\n# Load data into pandas, then use `.values` to get NumPy arrays\r\nmposyandu_data = pd.read_csv(mposyandu_data_filename).values\r\nmposyandu_target = pd.read_csv(mposyandu_target_filename).values\r\n\r\n# Convert one-column 2D array into 1D array for use with scikit-learn\r\nmposyandu_target = mposyandu_target.reshape((mposyandu_target.size,))\r\n# [END load-into-pandas]\r\n\r\n# [START train-and-save-model]\r\n# Train the model\r\nclassifier = svm.SVC(kernel='linear')\r\nclassifier.fit(mposyandu_data, mposyandu_target)\r\n\r\n# Export the classifier to a file\r\nmodel_filename = 'model.joblib'\r\njoblib.dump(classifier, model_filename)\r\n# [END train-and-save-model]\r\n\r\n\r\n# [START upload-model]\r\n# Upload the saved model file to Cloud Storage\r\ngcs_model_path = os.path.join('gs://', BUCKET_NAME,\r\n datetime.datetime.now().strftime('iris_%Y%m%d_%H%M%S'), model_filename)\r\nsubprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path],\r\n stderr=sys.stdout)\r\n# [END upload-model]\r\n","repo_name":"amaliaristantya/Capstone-m-Posyandu","sub_path":"mposyandu_training.py","file_name":"mposyandu_training.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9991315848","text":"from django.urls import path, include\nfrom .views import *\n\nurlpatterns = ([\n path('', ProjetcsListView.as_view(), name='projects'),\n path('//', ProjectDetailView.as_view(), name='project'),\n path('create/', ProjectCreateView.as_view(), name='create'),\n path('delete//', ProjectDeleteView.as_view(), name='delete'),\n \n path('//', include([\n #path de imágenes\n path('repos/',BancoImagesView.as_view(), name='repos'),\n path('image/',ImagesView.as_view(), name='image'),\n #path de algoritmos Custom Vision\n path('extract/',Extract_features_View.as_view(), name='extract'),\n path('train/',Training_View.as_view(), name='train'),\n path('identy/',clasification_View.as_view(), name='identy'),\n ]))\n],'projects')","repo_name":"FernandoGG3221/Classifier","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9645470531","text":"import numpy as np\nfrom spt3g import core, calibration, autoprocessing\nimport os.path\nimport pickle\nfrom glob import glob\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ks_2samp\nfrom scipy.signal import lombscargle\n\nextract_data = False\nplot = True\nresults_dict = {90: {'delta_cal_response':{},\n 'cal_response_before':{}},\n 150:{'delta_cal_response':{},\n 'cal_response_before':{}}}\n\nif extract_data:\n cal_path = '/sptgrid/analysis/calibration/calibrator'\n data_path = '/sptlocal/user/kferguson'\n boloprops = list(core.G3File('/sptgrid/analysis/calibration/boloproperties/60000000.g3'))[0]\n angle_data = {}\n for jband, band in enumerate([90,150]):\n with open(os.path.join(data_path, 'collated_final_angles_more_realizations_' + '%sGHz.pkl'%band),'rb') as f:\n angle_data[band] = pickle.load(f)\n\n scanify = autoprocessing.ScanifyDatabase(read_only=True)\n subfields = {}\n\n cal_obsids = np.sort([int(os.path.splitext(os.path.basename(path))[0]) \\\n for path in glob(os.path.join(cal_path, '*.g3'))])\n field_obsids = np.sort(list(angle_data[90].keys()))\n cal_obsids_before = []\n cal_obsids_after = []\n for obsid in field_obsids:\n cal_obsids_after.append(np.min(cal_obsids[cal_obsids>obsid]))\n cal_obsids_before.append(np.max(cal_obsids[cal_obsids median_frac_cal])\n plt.hist(angles_this_subfield[frac_cal_this_subfield < median_frac_cal],\n bins=np.linspace(-5, 5, 26),\n label='cal change < median', density=True, histtype='step')\n plt.hist(angles_this_subfield[frac_cal_this_subfield > median_frac_cal],\n bins=np.linspace(-5, 5, 26),\n label='cal change > median', density=True, histtype='step')\n plt.xlabel('angle / uncertainty')\n plt.title('{} GHz {}; KS p-value = {:.4E}'.format(band, subfield, pval))\n plt.legend()\n plt.savefig('angles_splitbycal_{}_{}.png'.format(band, subfield), dpi=200)\n plt.close()\n\n plt.figure()\n ls_freq = np.linspace(0.0005, 2, 4000)\n pgram = lombscargle(obsids_arr[subfields_arr==subfield] / (24*3600),\n delta_cal_response_arr[subfields_arr==subfield] - \\\n np.mean(delta_cal_response_arr[subfields_arr==subfield]), ls_freq)\n plt.plot(ls_freq, pgram)\n plt.xlabel('frequency [1/d]')\n plt.ylabel('Lomb-Scargle periodogram')\n plt.title('{}, {} GHz'.format(subfield, band))\n plt.tight_layout()\n plt.savefig('delta_cal_frac_lsgram_{}_{}GHz.png'.format(subfield, band), dpi=200)\n plt.close()\n\n","repo_name":"adamanderson/spt_analysis","sub_path":"analyses/axions/20210819_nonlinearity/extract_cal_differences.py","file_name":"extract_cal_differences.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3755196367","text":"#! /usr/bin/python3\n\n\"\"\"\n@author: Vicente Yáñez\n\nSimple script for config the log file\n\"\"\"\n\nimport logging\n\n# login config\nlogging.basicConfig(filename='../gfa.log', level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(name)s %(message)s')\nlogger = logging.getLogger(__name__)\n","repo_name":"VicenteYanez/GFA","sub_path":"gfa/log_config.py","file_name":"log_config.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31254988357","text":"import torch\nimport coremltools as ct\nimport numpy as np\nimport os\nimport sys\n\nfrom PIL import Image\nfrom scipy.spatial import distance\n\ndef crop_and_resize(image_name, new_size):\n img = Image.open(image_name)\n w, h = img.size\n \n if w < h:\n s = w\n else:\n s = h\n\n left = (w - s) / 2\n right = left + s\n top = (h - s) / 2\n bottom = top + s\n\n img = img.crop((left, top, right, bottom))\n img = img.resize((new_size, new_size))\n\n return img\n\n\nif len(sys.argv) < 3:\n print(f'USAGE: {sys.argv[0]} ')\n sys.exit(-1)\n\nSIZE=int(sys.argv[1])\nIMG=sys.argv[2]\n\nimg = crop_and_resize(IMG, SIZE)\n\n# load the AnimeGAN v2 model using the pretrained Face Portrait v2 weights\npt_model = torch.hub.load(\"bryandlee/animegan2-pytorch:main\", \"generator\", \n pretrained=\"face_paint_512_v2\")\npt_model.eval()\n\n# load the Core ML version of the AnimeGAN v2 model\ncml_model = ct.models.MLModel(f\"AnimeGANv2_{SIZE}.mlmodel\")\n\n# load a helper function for the PyTorch model\nface2paint = torch.hub.load(\"bryandlee/animegan2-pytorch:main\", \"face2paint\", size=SIZE) \n\n# run inference on the models\nout_pt = face2paint(pt_model, img, side_by_side=False)\nout_cml = cml_model.predict({'input': img})['output']\n\nbase, ext = os.path.splitext(IMG)\nout_pt.save(f'{base}_pt.png')\nout_cml.save(f'{base}_cml.png')\n\n# convert the output to numpy arrays removing any alpha channel, if present\nout_pt = np.array(out_pt)[:, :, :3]\nout_cml = np.array(out_cml)[:, :, :3]\n\n# flatten the arrays\nout_pt = out_pt.flatten()\nout_cml = out_cml.flatten()\n\n# calculate the cosine similarity\ncosine_similarity = 1.0 - distance.cosine(out_pt, out_cml)\nprint(f'Similarity: {cosine_similarity * 100:.2f}%')\n","repo_name":"yonomitt/Anime-fy-Yourself","sub_path":"python/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"21353781612","text":" \ndef run_1():\n print(max([sum(map(int, [i for i in elf.split('\\n') if i != ''])) for elf in open('input.txt', 'r').read().split('\\n\\n')]))\n\n\ndef run_2():\n with open('input.txt', 'r') as f:\n data = f.readlines()\n\n elf_cal_list = []\n cur_cal_counter = 0\n for line in data:\n if line == '\\n':\n elf_cal_list.append(cur_cal_counter)\n cur_cal_counter = 0\n continue\n\n cur_cal_counter += int(line)\n\n elf_cal_list.sort()\n print(sum(elf_cal_list[-3:]))\n\nif __name__ == '__main__':\n run_1()\n run_2()\n","repo_name":"timurgen/AdventOfCode2022","sub_path":"day_1/day_1.py","file_name":"day_1.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"16925837258","text":"\n\n\n\nclass service:\n secret = \"영구는 배꼽이 두 개다.\"\n def __init__(self,name):\n self.name = name\n print(\"멤버변수 %s 를 초기화 하였습니다.\"% name)\n def sum(self,a,b):\n result = a+ b\n print(\"%s님 %s + %s = %s입니다.\"%(self.name,a,b,result))\n def __del__(self):\n print(\"저희 서비스를 이용해 주셔서 감사합니다.\")\n\ninput()\npey = service('홍길동')\ninput()\npey.sum(1,1)\ninput()","repo_name":"itminha123/jumptopython","sub_path":"01.jumptopython/chap05/book/185.py","file_name":"185.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32715158102","text":"\"\"\"AFK Plugin for @UniBorg\nSyntax: .afk REASON\"\"\"\nimport asyncio\nimport datetime\nfrom telethon import events\nfrom telethon.tl import functions, types\n\nfrom random import choice, randint\nfrom asyncio import sleep\n\nfrom telethon.events import StopPropagation\n\nfrom userbot import AFKREASON, COUNT_MSG, CMD_HELP, ISAFK, BOTLOG,BOTLOG_CHATID, USERS, PM_AUTO_BAN\n \nfrom userbot.utils import register\n\nfrom userbot.utils import admin_cmd\n\nglobal USER_AFK # pylint:disable=E0602\nglobal afk_time # pylint:disable=E0602\nglobal last_afk_message # pylint:disable=E0602\nUSER_AFK = {}\nafk_time = None\nlast_afk_message = {}\n\nAFKSTR = [\n \"I'm busy right now. Please talk in a bag and when I come back you can just give me the bag!\",\n \"I'm away right now. If you need anything, leave a message after the beep:\\n`beeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeep`!\",\n \"You missed me, next time aim better.\",\n \"I'll be back in a few minutes and if I'm not...,\\nwait longer.\",\n \"I'm not here right now, so I'm probably somewhere else.\",\n \"Roses are red,\\nViolets are blue,\\nLeave me a message,\\nAnd I'll get back to you.\",\n \"Sometimes the best things in life are worth waiting for…\\nI'll be right back.\",\n \"I'll be right back,\\nbut if I'm not right back,\\nI'll be back later.\",\n \"If you haven't figured it out already,\\nI'm not here.\",\n \"Hello, welcome to my away message, how may I ignore you today?\",\n \"I'm away over 7 seas and 7 countries,\\n7 waters and 7 continents,\\n7 mountains and 7 hills,\\n7 plains and 7 mounds,\\n7 pools and 7 lakes,\\n7 springs and 7 meadows,\\n7 cities and 7 neighborhoods,\\n7 blocks and 7 houses...\\n\\nWhere not even your messages can reach me!\",\n \"I'm away from the keyboard at the moment, but if you'll scream loud enough at your screen, I might just hear you.\",\n \"I went that way\\n---->\",\n \"I went this way\\n<----\",\n \"Please leave a message and make me feel even more important than I already am.\",\n \"I am not here so stop writing to me,\\nor else you will find yourself with a screen full of your own messages.\",\n \"If I were here,\\nI'd tell you where I am.\\n\\nBut I'm not,\\nso ask me when I return...\",\n \"I am away!\\nI don't know when I'll be back!\\nHopefully a few minutes from now!\",\n \"I'm not available right now so please leave your name, number, and address and I will stalk you later.\",\n \"Sorry, I'm not here right now.\\nFeel free to talk to my userbot as long as you like.\\nI'll get back to you later.\",\n \"I bet you were expecting an away message!\",\n \"Life is so short, there are so many things to do...\\nI'm away doing one of them..\",\n \"I am not here right now...\\nbut if I was...\\n\\nwouldn't that be awesome?\",\n]\n\n@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602\nasync def set_not_afk(event):\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global last_afk_message # pylint:disable=E0602\n current_message = event.message.message\n if \".afk\" not in current_message and \"yes\" in USER_AFK: # pylint:disable=E0602\n try:\n await borg.send_message( # pylint:disable=E0602\n Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602\n \"Set AFK mode to False\"\n )\n except Exception as e: # pylint:disable=C0103,W0703\n await borg.send_message( # pylint:disable=E0602\n event.chat_id,\n \"Please set `PRIVATE_GROUP_BOT_API_ID` \" + \\\n \"for the proper functioning of afk functionality \" + \\\n \"in @xtratgbot\\nCheck pinned message for more info.\\n\\n `{}`\".format(str(e)),\n reply_to=event.message.id,\n silent=True\n )\n USER_AFK = {} # pylint:disable=E0602\n afk_time = None # pylint:disable=E0602\n\n\n@borg.on(events.NewMessage(pattern=r\"\\.afk ?(.*)\", outgoing=True)) # pylint:disable=E0602\nasync def _(event):\n if event.fwd_from:\n return\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global last_afk_message # pylint:disable=E0602\n global reason\n USER_AFK = {}\n afk_time = None\n last_afk_message = {}\n reason = event.pattern_match.group(1)\n if not USER_AFK: # pylint:disable=E0602\n last_seen_status = await borg( # pylint:disable=E0602\n functions.account.GetPrivacyRequest(\n types.InputPrivacyKeyStatusTimestamp()\n )\n )\n if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):\n afk_time = datetime.datetime.now() # pylint:disable=E0602\n USER_AFK = f\"yes: {reason}\" # pylint:disable=E0602\n if reason:\n await event.edit(f\"Set AFK mode to True, and Reason is {reason}\")\n else:\n await event.edit(f\"Set AFK mode to True\")\n await asyncio.sleep(5)\n await event.delete()\n try:\n await borg.send_message( # pylint:disable=E0602\n Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602\n f\"Set AFK mode to True, and Reason is {reason}\"\n )\n except Exception as e: # pylint:disable=C0103,W0703\n logger.warn(str(e)) # pylint:disable=E0602\n\n\n@borg.on(events.NewMessage( # pylint:disable=E0602\n incoming=True,\n func=lambda e: bool(e.mentioned or e.is_private)\n))\nasync def on_afk(event):\n if event.fwd_from:\n return\n global USER_AFK # pylint:disable=E0602\n global afk_time # pylint:disable=E0602\n global last_afk_message # pylint:disable=E0602\n afk_since = \"**a while ago**\"\n current_message_text = event.message.message.lower()\n if \"afk\" in current_message_text:\n # userbot's should not reply to other userbot's\n # https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots\n return False\n if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602\n if afk_time: # pylint:disable=E0602\n now = datetime.datetime.now()\n datime_since_afk = now - afk_time # pylint:disable=E0602\n time = float(datime_since_afk.seconds)\n days = time // (24 * 3600)\n time = time % (24 * 3600)\n hours = time // 3600\n time %= 3600\n minutes = time // 60\n time %= 60\n seconds = time\n if days == 1:\n afk_since = \"**Yesterday**\"\n elif days > 1:\n if days > 6:\n date = now + \\\n datetime.timedelta(\n days=-days, hours=-hours, minutes=-minutes)\n afk_since = date.strftime(\"%A, %Y %B %m, %H:%I\")\n else:\n wday = now + datetime.timedelta(days=-days)\n afk_since = wday.strftime('%A')\n elif hours > 1:\n afk_since = f\"`{int(hours)}h{int(minutes)}m` **ago**\"\n elif minutes > 0:\n afk_since = f\"`{int(minutes)}m{int(seconds)}s` **ago**\"\n else:\n afk_since = f\"`{int(seconds)}s` **ago**\"\n msg = None\n message_to_reply = f\"My Master Has Been Gone For {afk_since}\\nWhere He Is: ONLY GOD KNOWS \" + \\\n f\"\\n\\n__I promise I'll back in a few hours__\\n**REASON**: {reason}\" \\\n if reason \\\n else (str(choice(AFKSTR)))\n msg = await event.reply(message_to_reply)\n await asyncio.sleep(5)\n if event.chat_id in last_afk_message: # pylint:disable=E0602\n await last_afk_message[event.chat_id].delete() # pylint:disable=E0602\n last_afk_message[event.chat_id] = msg # pylint:disable=E0602\n","repo_name":"Solivagantt/userbot","sub_path":"userbot/plugins/afk.py","file_name":"afk.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73388375306","text":"# Partition Array into Disjoint Intervals\nfrom typing import List\n\n\nclass Solution:\n def partitionDisjoint(self, A: List[int]) -> int:\n p1, p2 = 0, 1\n maxN = A[0]\n while p2 < len(A):\n if A[p2] < maxN:\n maxN = max(maxN, max(A[p1+1:p2+1]))\n p1 = p2\n p2 += 1\n return p1 + 1\n\ns = Solution()\nprint(s.partitionDisjoint([1,1,1,0,6,12]))\n","repo_name":"GavinPHR/code","sub_path":"phase4/915.py","file_name":"915.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32628432589","text":"from Bio.Seq import Seq \n\n\ndef find(target,hamming):\n result = []\n base = ['A','C','G','T']\n \n\n def ca(remain_distance,current_seq,index):\n remain = remain_distance\n if remain == 0:\n result.append(current_seq)\n return \n\n for u in range(index,len(current_seq)):\n for char in base:\n if current_seq[u] != char:\n new_seq = current_seq[:u] + char + current_seq[u+1:]\n ca(remain_distance-1,new_seq,u+1)\n ca(hamming,target,0)\n return result\n\n\n\n\nwith open('rosalind_ba1j (1).txt','r') as f:\n seq = f.readline().strip()\n info = f.readline().strip().split()\n info = [int(i) for i in info]\nfivethree = []\n\nneighbors = []\nrev_neighbors = []\nneighbors_di = {}\nrev_neighbors_di = {}\nfor i in range(len(seq)-info[0]+1):\n particle = Seq(seq[i:i+info[0]])\n fivethree.append(particle)\nfor part in fivethree:\n for number in range(info[1]+1):\n a = find(part,number)\n for s in a:\n neighbors.append(s)\nfor i in neighbors:\n # rev_neighbors.append(i[::-1])\n i = Seq(i)\n rev_neighbors.append(i.reverse_complement())\n\n\n##fivethree neighbors is just easy way! \nfor i in neighbors:\n gaesoo = 0\n for s in fivethree:\n count = 0\n for u in range(info[0]):\n if i[u] != s[u]:\n count += 1\n if count <= info[1]:\n gaesoo += 1 \n neighbors_di[i] = gaesoo\n\n\nfor i in rev_neighbors:\n gaesoo = 0\n for s in fivethree:\n count = 0\n for u in range(info[0]):\n if i[u] != s[u]:\n count += 1\n if count <= info[1]:\n gaesoo += 1 \n rev_neighbors_di[i] = gaesoo\nsum_list = []\nfor kone,ktwo in zip(neighbors_di.keys(),rev_neighbors_di.keys()):\n sum = 0\n sum = neighbors_di[kone]+rev_neighbors_di[ktwo] \n sum_list.append(sum)\n\nfor kone,ktwo in zip(neighbors_di.keys(),rev_neighbors_di.keys()):\n if neighbors_di[kone]+rev_neighbors_di[ktwo] == max(sum_list):\n print(kone,end= ' ') \n\n\n\n\n\n\n\n\n","repo_name":"codenamegyoungho/rosalind_solution","sub_path":"rosalind_1J.py","file_name":"rosalind_1J.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37690391025","text":"\"\"\"\r\nProgram: NanoporeFiltration\r\nDescription: Apply filtration and compare kma results from testfiles (Reduced coverage) with reference results from ABRicate.\r\n Output effect of filtration, results after filtration and count of correct, missing and extra hits for plots.\r\nVersion: 1.0\r\nAuthor: Casper Westergaard\r\nExample string:\r\npython3 /srv/data/AS/CASW/scripts/Wrapper/GitHub/NanoporeFiltration.py -ip /srv/data/AS/CASW/data/phenotypes.txt -if /srv/data/AS/CASW/resultater/HAC/CPO20180005/kma -o /srv/data/AS/CASW/resultater/HAC/CPO20180005/ -ref /srv/data/AS/CASW/data/HAC/CPO20180005/assembly.fasta.resfinder.tsv -df 0.45 -md 0.2 -cl 1,2,3,4,5,6,7,8,9,10,15,20,30,40,50,60\r\n\"\"\"\r\n#Import libraries\r\nimport argparse\r\nimport csv\r\nimport glob\r\nimport os\r\nimport sys\r\n\r\n###########################################################################\r\n# FUNCTIONS\r\n########################################################################### \r\n\r\nclass Gene:\r\n def __init__(self, ab_class, phenotype, group):\r\n self.ab_class = ab_class\r\n self.phenotype = phenotype\r\n self.group = group\r\n \r\ndef get_genes_from_ref_file(csvfile, coverage_cutoff, identity_cutoff):\r\n \"\"\" Object stores a list of genes that is above the given identity and\r\n coverage thresholds.\r\n Each gene is a string created from gene_name and accession no.\r\n \"\"\"\r\n genes = list()\r\n\r\n with open(csvfile, 'r') as ref_results_file:\r\n\r\n reader = csv.reader(ref_results_file, delimiter='\\t')\r\n # ignore first row\r\n next(reader)\r\n\r\n for row in reader:\r\n coverage = float(row[8])\r\n identity = float(row[9])\r\n gene_name = row[4]\r\n accesion = row[11]\r\n\r\n if(coverage > coverage_cutoff and identity > identity_cutoff):\r\n genes.append(\"{}_{}\".format(gene_name, accesion))\r\n\r\n return genes\r\n\r\ndef get_genes_from_test_file(csvfile, coverage_cutoff, identity_cutoff):\r\n \"\"\" Object stores a list of genes that is above the given identity and\r\n coverage thresholds.\r\n Each gene is a string created from gene_name and accession no.\r\n \"\"\"\r\n genes = list()\r\n\r\n with open(csvfile, 'r') as test_results_file:\r\n\r\n reader = csv.reader(test_results_file, delimiter='\\t')\r\n # ignore first row\r\n next(reader)\r\n\r\n for row in reader:\r\n template_identity = float(row[4])\r\n template_coverage = float(row[5])\r\n query_identity = float(row[6])\r\n query_coverage = float(row[7])\r\n depth = float(row[8])\r\n gene_name = row[0].split('~~~')[1]\r\n accesion = row[0].split('~~~')[2]\r\n\r\n if (template_identity >= identity_cutoff and template_coverage >= coverage_cutoff \\\r\n and query_identity >= identity_cutoff and query_coverage >= coverage_cutoff):\r\n genes.append([\"{}_{}\".format(gene_name, accesion),[template_identity,template_coverage,query_identity,query_coverage,depth]])\r\n\r\n return genes\r\n \r\ndef get_gene_group(gene_name):\r\n \"\"\" Extract the gene group from the gene name and returns it\r\n \"\"\"\r\n gene_group = gene_name[0:3]\r\n\r\n if gene_group == 'bla':\r\n if len(gene_name.split('-')) == 1:\r\n\r\n if gene_name[0:6] == 'blaBEL':\r\n gene_group = 'blaBEL'\r\n else:\r\n gene_group = gene_name.split('_')[0]\r\n\r\n else:\r\n gene_group = gene_name.split('-')[0]\r\n\r\n return gene_group\r\n\r\n\r\ndef group_genes(gene_name, gene_group_dict):\r\n \"\"\" Takes a gene name and adds it to the appropriate gene group in the\r\n given dictionary\r\n \"\"\"\r\n gene_group = get_gene_group(gene_name)\r\n\r\n gene_list = gene_group_dict.get(gene_group, [])\r\n gene_list.append(gene_name)\r\n gene_group_dict[gene_group] = gene_list\r\n\r\n return gene_group_dict\r\n\r\n###########################################################################\r\n# GET INPUT\r\n###########################################################################\r\n \r\n# Input from commandline\r\n# Required input\r\nparser = argparse.ArgumentParser(description='Compare results from reference and testfiles')\r\nparser.add_argument('-ip', type=str, dest='input_phenotypes',\r\n help='Input file containing name and phenotype of all genes in database', required=True)\r\nparser.add_argument('-o', type=str, dest='output',\r\n help='Path to folder to put result in', required=True)\r\nparser.add_argument('-ref', type=str, dest='ref_file',\r\n help='Path to results from reference, used to compare other files with', required=True)\r\nparser.add_argument('-if', type=str, dest='input_folder',\r\n help='Path to folder containing kma results files for comparison with reference results', required=True)\r\nparser.add_argument('-cl', type=str, dest='coverage_list',\r\n help='List of desired coverages, a new file will be created for each element,'\r\n ' coverages should be comma-delimitered and ordered from lowest to highest', required=True)\r\n\r\n#Optional input\r\nparser.add_argument('-df', type=float, dest='depth_filtration', default='0',\r\n help='Set depth filtration, default 0, input value between 0 and 1', required=False)\r\nparser.add_argument('-md', type=float, dest='min_depth', default='0',\r\n help='Minimum depth for gene-groups to be included, default 0, input value between 0 and 1', required=False)\r\nparser.add_argument('-id', type=float, dest='identity_cutoff', default='90',\r\n help='Minimum identity for genes to be included, default 90, input value between 0 and 100', required=False)\r\nparser.add_argument('-cov', type=float, dest='coverage_cutoff', default='90',\r\n help='Minimum coverage for genes to be included, default 90, input value between 0 and 100', required=False)\r\nargs = parser.parse_args()\r\n\r\n# Initialize variables\r\noutput_path = '{}Depth={}_Min_Depth={}/'.format(args.output,args.depth_filtration,args.min_depth) \r\nos.makedirs('{}'.format(output_path), exist_ok=True)\r\nisolate = args.input_folder.split('/')[-3] # Get isolate name based on input folder\r\n\r\n# Check thal all coverages in list are ints\r\ntry:\r\n args.coverage_list = [int(x) for x in args.coverage_list.split(',')]\r\nexcept ValueError as err:\r\n print('Coverages must be in integer-values:', str(err))\r\n sys.exit(1)\r\nfor i, cov in enumerate(args.coverage_list):\r\n if i < len(args.coverage_list)-1:\r\n if cov > args.coverage_list[i + 1]:\r\n sys.exit('Coverages must be ordered from lowest to highest')\r\n\r\n# Create labels and mininum depth relative to coverage\r\nlabels = list()\r\nmin_depths = list()\r\nfor i in range(len(args.coverage_list)):\r\n labels.append('Nanopore Cov_'+str(args.coverage_list[i]))\r\n if args.min_depth > 0:\r\n min_depths.append(args.min_depth*int(args.coverage_list[i])) \r\n else:\r\n min_depths.append(0) \r\n# Add Illumina\r\nlabels.append('Illumina') \r\n\r\n###########################################################################\r\n# GET GENES FROM REFERENCE AND TESTFILES\r\n###########################################################################\r\n\r\n# Create gene-database containing Antibiotic class, Phenotype and Gene-group for each gene\r\n# Based on phenotypes.txt from the ResFinder database\r\ninputfile = open(args.input_phenotypes,'r')\r\ngene_dict_db = dict()\r\ninputfile.readline() # To not include the header in the list\r\nfor line in inputfile:\r\n line = line.split('\\t')\r\n gene_name = line[0].strip()\r\n phenotype = line[2].replace(' ','')\r\n antibiotic_class = line[1].replace(' ','').lower()\r\n # Inconsistencies between phenotypes.txt and antibiotic_classes.txt \r\n if antibiotic_class == 'lincosamides':\r\n antibiotic_class = 'lincosamide' \r\n gene_group = get_gene_group(gene_name)\r\n gene_dict_db[gene_name] = Gene(antibiotic_class, phenotype, gene_group)\r\n\r\n# Create database of genes in reference file\r\nref_genes = get_genes_from_ref_file(args.ref_file, args.coverage_cutoff, args.identity_cutoff)\r\nref_db = dict()\r\nfor ref_gene_name in ref_genes:\r\n if(ref_gene_name in gene_dict_db):\r\n gene = gene_dict_db[ref_gene_name]\r\n if gene.ab_class not in ref_db:\r\n ref_db[gene.ab_class] = dict()\r\n if gene.phenotype not in ref_db[gene.ab_class]:\r\n ref_db[gene.ab_class][gene.phenotype] = dict()\r\n if gene.group not in ref_db[gene.ab_class][gene.phenotype]:\r\n ref_db[gene.ab_class][gene.phenotype][gene.group] = list()\r\n ref_db[gene.ab_class][gene.phenotype][gene.group].append(ref_gene_name)\r\n \r\n# Create database of genes in test files. Save depth information about each gene-group.\r\n# Gene-group information is used for filtration later on.\r\ntestfiles_db = list() # list of dictionaries, one for each file \r\ntestfiles_gene_groups = list() # list of dictionaries, one for each file \r\ninput_files = glob.glob(args.input_folder+'*.res')\r\ninput_files.sort()\r\nfor file in input_files:\r\n testfiles_db.append(dict())\r\n testfiles_gene_groups.append(dict())\r\n testfile_genes = get_genes_from_test_file(file, args.coverage_cutoff, args.identity_cutoff)\r\n for testfile_gene_name in testfile_genes:\r\n if(testfile_gene_name[0] in gene_dict_db):\r\n gene = gene_dict_db[testfile_gene_name[0]]\r\n if gene.ab_class not in testfiles_db[-1]:\r\n testfiles_db[-1][gene.ab_class] = dict()\r\n if gene.phenotype not in testfiles_db[-1][gene.ab_class]:\r\n testfiles_db[-1][gene.ab_class][gene.phenotype] = dict()\r\n if gene.group not in testfiles_db[-1][gene.ab_class][gene.phenotype]:\r\n testfiles_db[-1][gene.ab_class][gene.phenotype][gene.group] = list()\r\n testfiles_db[-1][gene.ab_class][gene.phenotype][gene.group].append(testfile_gene_name)\r\n if gene.group not in testfiles_gene_groups[-1]:\r\n testfiles_gene_groups[-1][gene.group] = list()\r\n testfiles_gene_groups[-1][gene.group].append(testfile_gene_name)\r\n\r\n###########################################################################\r\n# DEPTH FILTRATION ON NANOPORE DATA\r\n########################################################################### \r\n \r\n# Sort genes in testfiles_gene_groups based on depth, and save total depth of each gene-group in total_depths\r\ntotal_depths = list() \r\nfor testfile in testfiles_gene_groups:\r\n total_depths.append(dict())\r\n for group in testfile:\r\n testfile[group].sort(key=lambda x: x[1][4],reverse = True) # Sort genes based on depth\r\n total_depths[-1][group] = sum([testfile[group][x][1][4] for x in range(len(testfile[group]))])\r\n\r\n# Filter by depth. If a gene has a depth lower than depth_cutoff*highest depth gene of that gene group,\r\n# or a gene-group depth lower than min_depth, then discard it.\r\ntestfiles_db_filtered = list()\r\nfor i in range(len(testfiles_db)-1): # No filtration on Illumina\r\n testfiles_db_filtered.append(dict())\r\n for ab_class in testfiles_db[i]:\r\n for phenotype in testfiles_db[i][ab_class]:\r\n for group in testfiles_db[i][ab_class][phenotype]:\r\n for gene in testfiles_db[i][ab_class][phenotype][group]:\r\n gene_depth = gene[1][4]\r\n max_gene_depth = testfiles_gene_groups[i][group][0][1][4] # Depth of highest depth gene in that gene-group\r\n # Depth filtration\r\n if gene_depth >= args.depth_filtration*max_gene_depth and total_depths[i][group] > min_depths[i]:\r\n if ab_class not in testfiles_db_filtered[i]:\r\n testfiles_db_filtered[i][ab_class] = dict()\r\n if phenotype not in testfiles_db_filtered[i][ab_class]:\r\n testfiles_db_filtered[i][ab_class][phenotype] = dict()\r\n if group not in testfiles_db_filtered[i][ab_class][phenotype]:\r\n testfiles_db_filtered[i][ab_class][phenotype][group] = list()\r\n testfiles_db_filtered[i][ab_class][phenotype][group].append(gene)\r\ntestfiles_db_filtered.append(testfiles_db[-1]) # Add Illumina results to filtered results\r\n\r\n###########################################################################\r\n# COMPARE FILTRATED RESULTS WITH REFERENCE\r\n########################################################################### \r\n \r\n# Compare testfiles with reference and save Correct, Missing and Extra hits.\r\n# Planned update: Include functions to improve readability\r\nmissing_class = list()\r\nextra_class = list()\r\ncorrect_class = list()\r\nmissing_phenotype = list()\r\nextra_phenotype = list()\r\ncorrect_phenotype = list()\r\nmissing_group = list()\r\nextra_group = list()\r\ncorrect_group = list()\r\nmissing_gene = list()\r\nextra_gene = list()\r\ncorrect_gene = list()\r\nfor i in range(len(testfiles_db_filtered)):\r\n missing_class.append(dict())\r\n extra_class.append(dict())\r\n correct_class.append(dict())\r\n missing_phenotype.append(dict())\r\n extra_phenotype.append(dict())\r\n correct_phenotype.append(dict())\r\n missing_group.append(dict())\r\n extra_group.append(dict())\r\n correct_group.append(dict())\r\n missing_gene.append(dict())\r\n extra_gene.append(dict())\r\n correct_gene.append(dict())\r\n # Compare testfiles with reference to identify correct and extra hits\r\n for ab_class in testfiles_db_filtered[i]:\r\n for phenotype in testfiles_db_filtered[i][ab_class]:\r\n for group in testfiles_db_filtered[i][ab_class][phenotype]:\r\n for gene_info in testfiles_db_filtered[i][ab_class][phenotype][group]: # Also contains information about depth, coverage and identity\r\n gene = gene_info[0] # Gene name and accesion number, to compare with reference.\r\n \r\n if ab_class in ref_db:\r\n correct_class[i].setdefault(ab_class, []).append(gene_info)\r\n \r\n if phenotype in ref_db[ab_class]:\r\n correct_phenotype[i].setdefault(ab_class, {})\r\n correct_phenotype[i][ab_class].setdefault(phenotype, []).append(gene_info)\r\n \r\n if group in ref_db[ab_class][phenotype]:\r\n correct_group[i].setdefault(ab_class, {})\r\n correct_group[i][ab_class].setdefault(phenotype, {})\r\n correct_group[i][ab_class][phenotype].setdefault(group, []).append(gene_info)\r\n \r\n if gene in ref_db[ab_class][phenotype][group]:\r\n correct_gene[i].setdefault(ab_class, {})\r\n correct_gene[i][ab_class].setdefault(phenotype, {})\r\n correct_gene[i][ab_class][phenotype].setdefault(group, {})\r\n correct_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene_info)\r\n else:\r\n extra_gene[i].setdefault(ab_class, {})\r\n extra_gene[i][ab_class].setdefault(phenotype, {})\r\n extra_gene[i][ab_class][phenotype].setdefault(group, {})\r\n extra_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene_info)\r\n \r\n else:\r\n extra_group[i].setdefault(ab_class, {})\r\n extra_group[i][ab_class].setdefault(phenotype, {})\r\n extra_group[i][ab_class][phenotype].setdefault(group, []).append(gene_info)\r\n extra_gene[i].setdefault(ab_class, {})\r\n extra_gene[i][ab_class].setdefault(phenotype, {})\r\n extra_gene[i][ab_class][phenotype].setdefault(group, {})\r\n extra_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene_info)\r\n \r\n else:\r\n extra_phenotype[i].setdefault(ab_class, {})\r\n extra_phenotype[i][ab_class].setdefault(phenotype, []).append(gene_info)\r\n extra_group[i].setdefault(ab_class, {})\r\n extra_group[i][ab_class].setdefault(phenotype, {})\r\n extra_group[i][ab_class][phenotype].setdefault(group, []).append(gene_info)\r\n extra_gene[i].setdefault(ab_class, {})\r\n extra_gene[i][ab_class].setdefault(phenotype, {})\r\n extra_gene[i][ab_class][phenotype].setdefault(group, {})\r\n extra_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene_info)\r\n \r\n else:\r\n extra_class[i].setdefault(ab_class, []).append(gene_info)\r\n extra_phenotype[i].setdefault(ab_class, {})\r\n extra_phenotype[i][ab_class].setdefault(phenotype, []).append(gene_info)\r\n extra_group[i].setdefault(ab_class, {})\r\n extra_group[i][ab_class].setdefault(phenotype, {})\r\n extra_group[i][ab_class][phenotype].setdefault(group, []).append(gene_info)\r\n extra_gene[i].setdefault(ab_class, {})\r\n extra_gene[i][ab_class].setdefault(phenotype, {})\r\n extra_gene[i][ab_class][phenotype].setdefault(group, {})\r\n extra_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene_info)\r\n \r\n # Compare reference with testfiles to identify missing hits \r\n for ab_class in ref_db: \r\n for phenotype in ref_db[ab_class]:\r\n for group in ref_db[ab_class][phenotype]:\r\n for gene in ref_db[ab_class][phenotype][group]:\r\n \r\n if ab_class not in testfiles_db_filtered[i]:\r\n missing_class[i].setdefault(ab_class, []).append(gene)\r\n missing_phenotype[i].setdefault(ab_class, {})\r\n missing_phenotype[i][ab_class].setdefault(phenotype, []).append(gene)\r\n missing_group[i].setdefault(ab_class, {})\r\n missing_group[i][ab_class].setdefault(phenotype, {})\r\n missing_group[i][ab_class][phenotype].setdefault(group, []).append(gene)\r\n missing_gene[i].setdefault(ab_class, {})\r\n missing_gene[i][ab_class].setdefault(phenotype, {})\r\n missing_gene[i][ab_class][phenotype].setdefault(group, {})\r\n missing_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene)\r\n \r\n else:\r\n if phenotype not in testfiles_db_filtered[i][ab_class]:\r\n missing_phenotype[i].setdefault(ab_class, {})\r\n missing_phenotype[i][ab_class].setdefault(phenotype, []).append(gene)\r\n missing_group[i].setdefault(ab_class, {})\r\n missing_group[i][ab_class].setdefault(phenotype, {})\r\n missing_group[i][ab_class][phenotype].setdefault(group, []).append(gene)\r\n missing_gene[i].setdefault(ab_class, {})\r\n missing_gene[i][ab_class].setdefault(phenotype, {})\r\n missing_gene[i][ab_class][phenotype].setdefault(group, {})\r\n missing_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene)\r\n \r\n else:\r\n if group not in testfiles_db_filtered[i][ab_class][phenotype]:\r\n missing_group[i].setdefault(ab_class, {})\r\n missing_group[i][ab_class].setdefault(phenotype, {})\r\n missing_group[i][ab_class][phenotype].setdefault(group, []).append(gene)\r\n missing_gene[i].setdefault(ab_class, {})\r\n missing_gene[i][ab_class].setdefault(phenotype, {})\r\n missing_gene[i][ab_class][phenotype].setdefault(group, {})\r\n missing_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene)\r\n \r\n else:\r\n gene_list = testfiles_db_filtered[i][ab_class][phenotype][group] # Also contains information about identity, coverage and depth\r\n if gene not in [gene_list[x][0] for x in range(len(gene_list))]: # Only gene name and accesion\r\n missing_gene[i].setdefault(ab_class, {})\r\n missing_gene[i][ab_class].setdefault(phenotype, {})\r\n missing_gene[i][ab_class][phenotype].setdefault(group, {})\r\n missing_gene[i][ab_class][phenotype][group].setdefault(gene, []).append(gene)\r\n\r\n###########################################################################\r\n# COUNT CORRECT, MISSING AND EXTRA HITS, USED TO GENERATE PLOTS\r\n########################################################################### \r\n# Count Correct, Extra and Missing hits. Don't include hits with Unknown phenotype.\r\ncorrect_class_count = list()\r\nmissing_class_count = list()\r\nextra_class_count = list() \r\ncorrect_phenotype_count = list()\r\nmissing_phenotype_count = list()\r\nextra_phenotype_count = list()\r\ncorrect_group_count = list()\r\nmissing_group_count = list()\r\nextra_group_count = list() \r\ncorrect_gene_count = list()\r\nmissing_gene_count = list()\r\nextra_gene_count = list()\r\nfor i in range(len(input_files)):\r\n # Count for Antibiotic classes \r\n correct_class_count.append(len(correct_class[i])) \r\n missing_class_count.append(len(missing_class[i])) \r\n extra_class_count.append(len(extra_class[i]))\r\n \r\n # Count for Phenotypes\r\n correct_phenotype_count.append(0) \r\n missing_phenotype_count.append(0) \r\n extra_phenotype_count.append(0)\r\n for ab_class in correct_phenotype[i]:\r\n for phenotype in correct_phenotype[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n correct_phenotype_count[i] += 1\r\n \r\n for ab_class in missing_phenotype[i]:\r\n for phenotype in missing_phenotype[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n missing_phenotype_count[i] += 1 \r\n \r\n for ab_class in extra_phenotype[i]:\r\n for phenotype in extra_phenotype[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n extra_phenotype_count[i] += 1\r\n \r\n # Count for Gene-groups\r\n correct_group_count.append(0) \r\n missing_group_count.append(0) \r\n extra_group_count.append(0)\r\n for ab_class in correct_group[i]:\r\n for phenotype in correct_group[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n correct_group_count[i] += len(correct_group[i][ab_class][phenotype])\r\n \r\n for ab_class in missing_group[i]:\r\n for phenotype in missing_group[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n missing_group_count[i] += len(missing_group[i][ab_class][phenotype])\r\n \r\n for ab_class in extra_group[i]:\r\n for phenotype in extra_group[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n extra_group_count[i] += len(extra_group[i][ab_class][phenotype])\r\n \r\n # Count for Gene-variants\r\n correct_gene_count.append(0) \r\n missing_gene_count.append(0) \r\n extra_gene_count.append(0)\r\n for ab_class in correct_gene[i]:\r\n for phenotype in correct_gene[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n for group in correct_gene[i][ab_class][phenotype]:\r\n correct_gene_count[i] += len(correct_gene[i][ab_class][phenotype][group])\r\n \r\n for ab_class in missing_gene[i]:\r\n for phenotype in missing_gene[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n for group in missing_gene[i][ab_class][phenotype]:\r\n missing_gene_count[i] += len(missing_gene[i][ab_class][phenotype][group])\r\n \r\n for ab_class in extra_gene[i]:\r\n for phenotype in extra_gene[i][ab_class]:\r\n if phenotype != 'Unknown':\r\n for group in extra_gene[i][ab_class][phenotype]:\r\n extra_gene_count[i] += len(extra_gene[i][ab_class][phenotype][group]) \r\n\r\n###########################################################################\r\n# OUTPUT RESULTS\r\n########################################################################### \r\n# Output Counts, used for generating plots\r\nfilename = '{}{}_summary_stats.txt'.format(output_path,isolate)\r\nwith open(filename, 'w') as outfile:\r\n outfile.write(\"Identity cutoff = {}\\nCoverage cutoff = {}\\nDepth cutoff (Gene-group) = {}%\\nMinimum Depth = {}%\"\r\n .format(args.identity_cutoff,args.coverage_cutoff,100*args.depth_filtration,100*args.min_depth))\r\n outfile.write('\\n##Label\\tCorrect hits\\tMissing hits\\tExtra hits')\r\n outfile.write('\\n\\n#Antibiotic class')\r\n for i in range(len(labels)):\r\n outfile.write('\\n{}\\t{}\\t{}\\t{}'.format(labels[i],correct_class_count[i],missing_class_count[i],extra_class_count[i])) \r\n outfile.write('\\n\\n#Phenotype')\r\n for i in range(len(labels)):\r\n outfile.write('\\n{}\\t{}\\t{}\\t{}'.format(labels[i],correct_phenotype_count[i],missing_phenotype_count[i],extra_phenotype_count[i])) \r\n outfile.write('\\n\\n#Gene-group')\r\n for i in range(len(labels)):\r\n outfile.write('\\n{}\\t{}\\t{}\\t{}'.format(labels[i],correct_group_count[i],missing_group_count[i],extra_group_count[i])) \r\n outfile.write('\\n\\n#Gene')\r\n for i in range(len(labels)):\r\n outfile.write('\\n{}\\t{}\\t{}\\t{}'.format(labels[i],correct_gene_count[i],missing_gene_count[i],extra_gene_count[i])) \r\n\r\n\r\n# Output effect of depth filtration\r\nos.chdir(args.input_folder)\r\ninput_files = glob.glob('*.res')\r\ninput_files.sort()\r\ninput_files.pop(-1) # No filtration on Illumina data\r\nfor i, file in enumerate(input_files):\r\n filename = '{}{}_Depth={}_Min_Depth={}_filtration.txt'.format(output_path,file,args.depth_filtration,args.min_depth)\r\n with open(filename, 'w') as outfile:\r\n # Output filtration parameters\r\n outfile.write('Effect of depth filtration on: {}\\nCutoffs for including genes are:\\nIdentity >= {}\\n'\r\n 'Coverage >= {}\\nDepth(Relative to highest depth gene from that gene-group) >= {}%'\r\n '\\nMinimum gene-group depth (Relative to avg. genome coverage) >= {}%\\n'\r\n .format(file,args.identity_cutoff,args.coverage_cutoff,100*args.depth_filtration,100*args.min_depth))\r\n \r\n # Output results of filtration\r\n for group in testfiles_gene_groups[i]:\r\n outfile.write('\\nTotal depth of genes from gene-group {} is: {}'.format(group,round(total_depths[i][group],2)))\r\n # Discarded gene-groups due to min_depth\r\n if total_depths[i][group] < min_depths[i]:\r\n outfile.write('\\nGene-group not included, due to threshold for minimum gene-group depth\\n')\r\n outfile.write('\\nDiscarded {} genes:\\n'.format(group))\r\n for gene in testfiles_gene_groups[i][group]:\r\n outfile.write('{} - With a depth of {}\\n'.format(gene[0],gene[1][4])) \r\n # Included gene-groups\r\n else:\r\n max_depth_gene = testfiles_gene_groups[i][group][0]\r\n outfile.write('\\nHighest depth gene from gene-group {} is:\\n{} - With a depth of: {}'\r\n .format(group,max_depth_gene[0],max_depth_gene[1][4]))\r\n gene_depth_cutoff = args.depth_filtration*max_depth_gene[1][4] \r\n outfile.write('\\nDepth threshold for including genes from gene-group {} is: {}'\r\n .format(group,round(gene_depth_cutoff,2)))\r\n # Genes included by depth filtration\r\n outfile.write('\\n\\nIncluded {}-genes:\\n'.format(group))\r\n for gene in testfiles_gene_groups[i][group]:\r\n gene_depth = gene[1][4]\r\n if gene_depth >= gene_depth_cutoff:\r\n outfile.write('{} - With a depth of {}\\n'.format(gene[0],gene[1][4])) \r\n else:\r\n break\r\n # Genes discarded by depth filtration\r\n outfile.write('\\nDiscarded {}-genes:\\n'.format(group))\r\n for gene in testfiles_gene_groups[i][group]: \r\n gene_depth = gene[1][4]\r\n if gene_depth < gene_depth_cutoff:\r\n outfile.write('{} - With a depth of {}\\n'.format(gene[0],gene[1][4]))\r\n \r\n# Output Correct, Missing and Extra hits after filtration, at gene-group level.\r\nos.chdir(args.input_folder)\r\ninput_files = glob.glob('*.res')\r\ninput_files.sort()\r\nfor i, file in enumerate(input_files):\r\n filename = '{}{}_Depth={}_Min_Depth={}_results.txt'.format(output_path,file,args.depth_filtration,args.min_depth)\r\n with open(filename, 'w') as outfile:\r\n outfile.write('Results after depth filtration on: {}\\nCutoffs for including genes are:\\nIdentity >= {}\\n'\r\n 'Coverage >= {}\\nDepth(Relative to highest depth gene from that gene-group) >= {}%'\r\n '\\nMinimum gene-group depth (Relative to avg. genome coverage) >= {}%\\n'\r\n .format(file,args.identity_cutoff,args.coverage_cutoff,100*args.depth_filtration,100*args.min_depth))\r\n outfile.write('Correct, Extra and Missing hits based on results at gene-group level\\n')\r\n outfile.write('Gene information: Gene-variant\\tTemplate-Identity\\tTemplate-Coverage\\tQuery-Identity\\tQuery-Coverage\\tDepth\\n')\r\n outfile.write('\\n############\\nCorrect hits\\n############')\r\n for ab_class in correct_group[i]:\r\n outfile.write('\\n###Antimicrobial class - {}'.format(ab_class))\r\n for phenotype in correct_group[i][ab_class]:\r\n outfile.write('\\n##Phenotype - {}'.format(phenotype))\r\n for group in correct_group[i][ab_class][phenotype]:\r\n genes = correct_group[i][ab_class][phenotype][group]\r\n outfile.write('\\n#Gene-group - {} - Total depth before filtration, across all phenotypes: {}\\n'.format(group,round(total_depths[i][group],2)))\r\n outfile.write('Included genes:\\n')\r\n for gene in genes:\r\n gene_name = gene[0]\r\n template_identity = gene[1][0]\r\n template_coverage = gene[1][1]\r\n query_identity = gene[1][2]\r\n query_coverage = gene[1][3]\r\n depth = gene[1][4]\r\n outfile.write('{} - {}\\t{}\\t{}\\t{}\\t{}\\n'.format(gene_name,template_identity,template_coverage,query_identity,query_coverage,depth))\r\n outfile.write('\\n############\\nExtra hits\\n############')\r\n for ab_class in extra_group[i]:\r\n outfile.write('\\n###Antimicrobial class - {}'.format(ab_class))\r\n for phenotype in extra_group[i][ab_class]:\r\n outfile.write('\\n##Phenotype - {}'.format(phenotype))\r\n for group in extra_group[i][ab_class][phenotype]:\r\n genes = extra_group[i][ab_class][phenotype][group]\r\n outfile.write('\\n#Gene-group - {} - Total depth before filtration, across all phenotypes: {}\\n'.format(group,round(total_depths[i][group],2)))\r\n outfile.write('Included genes:\\n')\r\n for gene in genes:\r\n gene_name = gene[0]\r\n template_identity = gene[1][0]\r\n template_coverage = gene[1][1]\r\n query_identity = gene[1][2]\r\n query_coverage = gene[1][3]\r\n depth = gene[1][4]\r\n outfile.write('{} - {}\\t{}\\t{}\\t{}\\t{}\\n'.format(gene_name,template_identity,template_coverage,query_identity,query_coverage,depth))\r\n outfile.write('\\n############\\nMissing hits\\n############')\r\n for ab_class in missing_group[i]:\r\n outfile.write('\\n###Antimicrobial class - {}'.format(ab_class))\r\n for phenotype in missing_group[i][ab_class]:\r\n outfile.write('\\n##Phenotype - {}\\n'.format(phenotype))\r\n for group in missing_group[i][ab_class][phenotype]:\r\n genes = missing_group[i][ab_class][phenotype][group]\r\n for gene in genes:\r\n gene_name = gene\r\n outfile.write('{}\\n'.format(gene_name))","repo_name":"CWestergaard/NanoporeFiltrationCompare","sub_path":"NanoporeFiltration.py","file_name":"NanoporeFiltration.py","file_ext":"py","file_size_in_byte":33755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32268421814","text":"import sys\r\nfrom PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, \r\n QAction, QTableWidget,QTableWidgetItem,\r\n QVBoxLayout,QHeaderView,QSizePolicy)\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtCore import pyqtSlot,Qt\r\n\r\nclass Table(QWidget):\r\n '''Create the tables necessary for parts and labor\r\n '''\r\n def __init__(self,rows,columns):\r\n super().__init__()\r\n# self.showMaximized()\r\n self.rows=rows\r\n self.columns=columns\r\n self.size_policy=QSizePolicy.Expanding\r\n self.init()\r\n self.show()\r\n def init(self):\r\n '''initialize all the necessary widgets for the table\r\n '''\r\n self.table_init() \r\n self.layout=QVBoxLayout(self)\r\n self.layout.addWidget(self.tableWidget)\r\n self.setLayout(self.layout)\r\n \r\n def table_init(self):\r\n '''Initialize the table entry area\r\n '''\r\n self.tableWidget=QTableWidget(self)\r\n self.tableWidget.setRowCount(self.rows)\r\n self.tableWidget.setColumnCount(self.columns)\r\n self.tableWidget.setSizePolicy(self.size_policy,self.size_policy)\r\n\r\n# header = self.tableWidget.horizontalHeader()\r\n# for i in range(0,self.columns): \r\n# header.setSectionResizeMode(i, QHeaderView.ResizeToContents)\r\n self.tableWidget.horizontalHeader(\r\n ).setSectionResizeMode(QHeaderView.ResizeToContents)\r\n \r\nif __name__=='__main__':\r\n app = QApplication(sys.argv)\r\n ex = Table()\r\n sys.exit(app.exec_())","repo_name":"alangburl/BEI_Invoice","sub_path":"table_widget.py","file_name":"table_widget.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37754265071","text":"responses = ['Welcome to smart calculator.','My name is darcia:)','Thank you! Do revisit us.','Sorry, i was unable to solve your query:(']\n\ndef getVal(text):\n l = []\n for w in text.split(' '):\n try:\n l.append(float(w))\n except ValueError:\n pass\n return l\ndef add(a,b):\n return a+b\ndef mult(a,b):\n return a*b\ndef div(a,b):\n return a/b\ndef sub(a,b):\n return a-b\ndef oops():\n print(responses[3])\ndef close():\n print(responses[2])\n input(\"Press Enter to quit \")\n exit()\ndef name():\n print(responses[1])\ndef hcf(a,b):\n h = min(a,b)\n while h >= 1:\n if a%h==0 and b%h==0:\n return h\n h-=1\ndef lcm(a,b):\n l = max(a,b)\n while l <= a*b:\n if l%a==0 and l%b==0:\n return l\n l+=1\ndef functions():\n print(\"The functions which we are providing are:-\")\n print(\"1. LCM and HCF of two numbers.\")\n print(\"2. Addition and Subtraction between two numbers.\")\n print(\"3. Division and Multiplication of two numbers.\")\n print(\"We expect some logical queries from the user which involves functions mentioned.\")\n print(\"We hope that the user gets all their queries clear.\")\n print()\noperations = {'lcm':lcm,'hcf':hcf,'addition':add,'add':add,'plus':add,'minus':sub,'subtract':sub,'subtraction':sub,'div':div,'divide':div,'division':div,'multiply':mult,'multiplication':mult}\ncommands = {'exit':close,'close':close,'quit':close,'name':name}\n\nprint(responses[0])\nprint(responses[1])\nfunctions()\n\nwhile True:\n print()\n query = input(\"Enter your Query \")\n for q in query.split(' '):\n if q.lower() in operations.keys():\n try:\n l = getVal(query)\n r = operations[q.lower()](l[0],l[1])\n print(r)\n except:\n print(\"Something went Wrong:( Please try again later!\")\n finally:\n break\n elif q.lower() in commands.keys():\n commands[q.lower()]()\n else:\n oops()\n","repo_name":"Ritikraj1126/Quiz_Game","sub_path":"Calci_in_python.py","file_name":"Calci_in_python.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16466533121","text":"import json\nimport string\nimport secrets\nimport time\nimport heapq\nimport random\nimport os\n\nalphabet = string.ascii_letters + string.digits\n\n# Tipos de Nodos\nCLIENT = 0\nENTRY_POINT = 1\nLOGGER = 2\nCHORD = 3\nCHORD_INTERNAL = 4\nTWEET = 5\n\n# Protocolos de pedidos\nLOGIN_REQUEST = 0\nLOGIN_RESPONSE = 1\nCHORD_REQUEST = 2\nCHORD_RESPONSE = 3\nNEW_LOGGER_REQUEST = 4\nNEW_LOGGER_RESPONSE = 5\nALIVE_REQUEST = 6\nALIVE_RESPONSE = 7\nREGISTER_REQUEST = 8 \nREGISTER_RESPONSE = 9\nTRANSFERENCE_REQUEST = 10\nTRANSFERENCE_RESPONSE = 11\nTRANSFERENCE_OVER = 12\nCREATE_TWEET_REQUEST = 13\nCREATE_TWEET_RESPONSE = 14\nRETWEET_REQUEST=15\nRETWEET_RESPONSE =16\nFOLLOW_REQUEST = 17\nFOLLOW_RESPONSE = 18\nFEED_REQUEST = 19\nFEED_RESPONSE = 20\nPROFILE_REQUEST =21\nPROFILE_RESPONSE = 22\nNEW_ENTRYPOINT_REQUEST = 23\nNEW_ENTRYPOINT_RESPONSE = 24\nLOGOUT_REQUEST = 25\nLOGOUT_RESPONSE = 26\nGET_TOKEN = 27\nRECENT_PUBLISHED_REQUEST = 28\nRECENT_PUBLISHED_RESPONSE = 29\nCHECK_TWEET_REQUEST = 30\nCHECK_TWEET_RESPONSE = 31\nADD_LOGGER = 32\nREMOVE_LOGGER = 33\nADD_ENTRY = 34\nREMOVE_ENTRY = 35\nINSERTED_LOGGER_REQUEST = 36\nINSERTED_LOGGER_RESPONSE = 37\nCHECK_USER_REQUEST = 38\nCHECK_USER_RESPONSE = 39\nGET_TWEET = 40\nPROFILE_GET = 41\nHELLO = 42\nWELLCOME = 43\nHARD_WRITE = 44\nPROFILE_DATA_REQUEST = 45\nADD_TWEET = 46\nADD_RETWEET = 47\nADD_PROFILE = 48\nADD_FOLLOW = 49\nADD_TOKEN = 50\nREMOVE_TOKEN = 51\n\n# Puertos de escucha\nPORT_GENERAL_ENTRY = 15069\nCHORD_PORT = 15042\nPORT_GENERAL_LOGGER = 15071\n\ndef encode(data_dict):\n '''\n Codifica un diccionario de Python a bytes\n '''\n return json.dumps(data_dict).encode()\n\ndef decode(data_bytes):\n '''\n Decodifica bytes para interpretarlo como diccionario de Python\n '''\n return json.loads(data_bytes)\n\ndef gen_token(n_bytes):\n return ''.join(secrets.choice(alphabet) for i in range(n_bytes))\n\n\nclass Dispatcher:\n\n def __init__(self):\n self.__next_petition_id = 0\n self.petitions = {}\n self.slaves = None\n\n def insert_petition(self, petition):\n ret = self.__next_petition_id\n self.petitions[ret] = petition\n self.__next_petition_id += 1\n return ret\n\n def extract_petition(self, id):\n return self.petitions.get(id, None)\n\n\nclass Stalker:\n '''\n Estructura que guarda una lista de IP:Puertos (o Puertos),\n con la ultima hora de actividad. Recomienda de forma aleatoria un IP\n para verificar si est'a vivo a'un, pero dando mas probabilidad a los\n IP menos actualizados.\n '''\n def __init__(self, type, umbral_alive = 90, umbral_deads=30):\n '''\n Inicializa la estructura Stalker con el tipo de Server que la aloje.\n Internamente utiliza una lista con tuplas de la forma (tiempo, IP:Port)\n '''\n self.list = []\n self.type = type\n self.umbral_alive = umbral_alive\n self.umbral_deads = umbral_deads\n self.alive_dirs = []\n self.deads_dirs = []\n\n def insert_IP(self, dir):\n '''\n Agrega una nueva direcci'on IP a la lista. La presupone nueva.\n Utilizar mejor update cuando no se tiene la certeza de su existencia. \n '''\n self.list.append((time.time(), dir))\n\n def update_IP(self, dir):\n '''\n Actualiza el tiempo de un IP. Si este est'a solamente se actualiza el tiempo\n con el tiempo actual. Si no est'a, se a~nade nuevo.\n '''\n for i, item in enumerate(self.list):\n if item[1] == dir:\n self.list[i] = (time.time(), dir)\n self.list.sort()\n return True\n self.list.append((time.time(), dir))\n return False\n\n def extract_IP(self, dir):\n '''\n Se elimina el IP de la lista y se retorna su valor. Si este no existe\n se retorna None\n '''\n for i, item in enumerate(self.list):\n if item[1] == dir:\n return self.list.pop(i)\n return None\n \n def recommended_dir(self):\n '''\n Se recomienda alg'un IP de la lista. Mientras m'as viejo, m'as probable\n eres de ser recomendado.\n '''\n _, dir = random.choices(self.list,weights=range(len(self.list), 0, -1),k=1)[0]\n return dir\n \n def refresh_dirs(self):\n\n self.alive_dirs = []\n real_time = time.time()\n self.deads_dirs = []\n for i in range(len(self.list)):\n t, dir = self.list[i]\n if real_time - t >= self.umbral_deads:\n self.deads_dirs.append(dir)\n if real_time - t <= self.umbral_alive:\n self.alive_dirs.append(dir)\n return self.deads_dirs.copy()\n\n def msg_stalk(self):\n '''\n Genera el mensaje de ALIVE_REQUEST\n '''\n msg = {\n 'type': self.type,\n 'proto': ALIVE_REQUEST, # Definir el protocolo de estar vivo.\n }\n return msg\n\n\ndef clear():\n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n\nclass Cache:\n\n def __init__(self) -> None:\n self.profiles = {}\n\n def add_something(self, date, text, nick, nick_original, date_original):\n profile = self.profiles.get(nick, None)\n if profile is None:\n self.profiles[nick] = []\n self.profiles[nick].append((date, text, nick_original, date_original))\n\n def add_many_something(self, list):\n for date, text, nick, nick_original, date_original in list:\n self.add_many_something(date, text, nick, nick_original, date_original)\n \n","repo_name":"Leo00010011/Distributed-Twitter","sub_path":"API/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39638618232","text":"# -*- coding: utf-8 -*-\n\nimport time\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef timethis(label):\n # yield 之前的代码会在上下文管理器中作为 __enter__() 方法执行\n # 所有在 yield 之后的代码会作为 __exit__() 方法执行\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n print('{}: {}'.format(label, end - start))\n\n\ndef create_a_big_list():\n with timethis('create_a_big_list'):\n data = list(range(10000000))\n return data\n\n\ndef test_for(data):\n with timethis('test_for'):\n temp = []\n for i in data:\n if not i % 3:\n temp.append(i)\n return temp\n\n\ndef test_list_range(data):\n with timethis('test_list_range'):\n temp = [x for x in data if not x % 3]\n return temp\n\n\ndef count_10kk():\n with timethis('counting'):\n n = 10000000\n while n > 0:\n n -= 1\n\n\n@contextmanager\ndef list_transaction(orig_list):\n working = list(orig_list)\n yield working\n orig_list[:] = working\n\n\ndef test_list():\n items = [1, 2, 3]\n with list_transaction(items) as working:\n working.append(4)\n working.append(5)\n print(items)\n try:\n with list_transaction(items) as working:\n working.append(6)\n working.append(7)\n raise RuntimeError('Oops')\n except RuntimeError as e:\n print(e)\n print(items)\n\nif __name__ == \"__main__\":\n count_10kk()\n data = create_a_big_list()\n res1 = test_for(data)\n res2 = test_list_range(data)\n print('res == res2?', res1 == res2)\n test_list()\n","repo_name":"halysl/python_module_study_code","sub_path":"src/study_cookbook/9元编程/定义上下文管理器的简单方法.py","file_name":"定义上下文管理器的简单方法.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31566877884","text":"import flask_debugtoolbar\nimport math\nfrom flask import *\nfrom flask_login import LoginManager\nfrom flask_wtf import CSRFProtect\n\napp = Flask(__name__)\napp.debug = True\napp.config['SECRET_KEY'] = 'oqpi23z9q82z3qr9823zh9oq82zhroq289zhrrrr29r'\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n# toolbar = flask_debugtoolbar.DebugToolbarExtension(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'user_manager.login'\nlogin_manager.login_message = 'Nur als registrierter Nutzer möglich.'\nSELF = \"'self'\"\nINLINE = \"'unsafe-inline'\"\nEVAL = \"'unsafe-eval'\"\nsrcs = [\n SELF,\n INLINE,\n '*.localhost',\n '*.jquery.com',\n '*.googleapis.com',\n '*.getmdl.io',\n '*.cloudflare.com',\n '*.gstatic.com',\n '*.w3.org',\n]\ncsp = {\n 'default-src': srcs,\n 'style-src': srcs,\n 'script-src': srcs\n}\n# talisman = Talisman(app, content_security_policy=csp)\ncsrf = CSRFProtect()\napp.wtf_csrf_secret_key = 'apow389paw3z5ap385awp35zapwoehpcbykls3478tz'\ncsrf.init_app(app)\nhandling = \"\"\n\n\n@app.route('/index')\ndef index():\n \"\"\"\n Zeige Landingpage\n :return: index template\n \"\"\"\n return render_template('index.html')\n\n\n@app.route('/')\ndef hello_world():\n \"\"\"\n Leite auf Landingpage weiter\n :return: redirect index\n \"\"\"\n return redirect(url_for(\"index\"))\n\n\n\n\n@app.context_processor\ndef inject_stage_and_region():\n \"\"\"\n APP CONTEXT PROCESSOR: Funktionen und Variablen die an alle Templates mit übergeben werden.\n Hier: Lückensettings, tipps, aufgabenstellungen und format_price funktion\n :return:\n \"\"\"\n\n def format_price(amount):\n \"\"\"\n Funktion um einen Integer ct betrag zu nehmen und diesen als 00,00 € anzuzeigen um Rundungsfehler von kommazahlen zu vermeiden.\n :param amount:\n :return: 543 -> 5,43 €\n \"\"\"\n frac, whole = math.modf(amount / 100)\n number_after = str(frac).split(\".\")[1]\n number_pre = str(whole).split(\".\")[0]\n if number_after == '0':\n number_after = '00'\n return number_pre + \",\" + number_after + \" €\"\n\n return {\n \"sec_settings\": {\n \"itemtype_handling\": app.config[\"itemtype_handling\"],\n \"cart_negative_quantity_handling\": app.config[\"cart_negative_quantity_handling\"],\n \"sql_injection_login\": app.config[\"sql_injection_login\"],\n \"email_template_handling\": app.config[\"email_template_handling\"],\n \"secret_key_handling\": app.config['secret_key_handling'],\n \"user_id_handling\": app.config['user_id_handling']\n },\n \"tips\": active_tipps,\n \"format_price\": format_price,\n \"aufgaben\": active_aufgabenstellung\n }\n\n\nfrom controller.controller_flag_manager import flag_manager, active_tipps, active_aufgabenstellung\n\napp.register_blueprint(flag_manager)\n\nfrom controller.controller_user_manager import user_manager\n\napp.register_blueprint(user_manager)\n\nfrom controller.controller_scoreboard import scoreboard\n\napp.register_blueprint(scoreboard)\n\nfrom controller.controller_admin import admin\n\napp.register_blueprint(admin)\n\nfrom controller.controller_cart import cart\n\napp.register_blueprint(cart)\n\nfrom controller.controller_shop import shopctrl\n\napp.register_blueprint(shopctrl)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Auron2402/unsecuresecurewebshop","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19871656348","text":"'''\n从ztg抽好的denseface文件转换为适配我这里target的denseface.h5文件\n'''\n\nimport os\nimport h5py\nimport numpy as np\nimport glob\nfrom tqdm import tqdm\n\ndef mkdir(path):\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n \ndef get_basename(path):\n basename = os.path.basename(path)\n if os.path.isfile(path):\n basename = basename[:basename.rfind('.')]\n return basename\n\ndataset_root = '/data9/datasets/Aff-Wild2/'\nfeatures_dir = '/data9/hzp/ABAW_VA_2022/processed_data/features'\ntargets_dir = '/data9/hzp/ABAW_VA_2022/processed_data/targets'\nztg_file = '/data9/datasets/Aff-Wild2/h5_data/VA_Estimation_Challenge/feature/denseface.h5'\n\nztg_h5f = h5py.File(ztg_file, 'r')\nztg_set_list = ['trn', 'val']\nset_list = ['train', 'val']\n\nspecial_file = os.path.join(targets_dir, 'special_videos.h5')\nspecial_h5f = h5py.File(special_file, 'r')\n\nfor ztg_set, set_name in zip(ztg_set_list, set_list):\n print('--------------process {}--------------'.format(set_name))\n valid_targets_path = os.path.join(targets_dir, '{}_valid_targets.h5'.format(set_name))\n valid_targets_h5f = h5py.File(valid_targets_path, 'r')\n denseface_path = os.path.join(features_dir, '{}_denseface.h5'.format(set_name))\n denseface_h5f = h5py.File(denseface_path, 'w')\n\n for video in tqdm(list(valid_targets_h5f.keys())):\n if valid_targets_h5f[video]['special'][()] == 0:\n video_group = denseface_h5f.create_group(video)\n fts = ztg_h5f[ztg_set][video]['feature'][()]\n assert len(fts) == valid_targets_h5f[video]['length'][()]\n video_group['fts'] = fts\n valid = [0 if not i.any() else 1 for i in fts]\n video_group['valid'] = np.array(valid)\n\n else: # 后切出来的片段\n original_video = '_'.join(video.split('_')[:-1])\n video_group = denseface_h5f.create_group(video)\n seg_start = special_h5f[original_video][video]['start'][()]\n seg_end = special_h5f[original_video][video]['end'][()]\n whole_video_fts = ztg_h5f[ztg_set][original_video]['feature'][()]\n video_group['fts'] = whole_video_fts[seg_start: seg_end + 1]\n assert len(video_group['fts']) == valid_targets_h5f[video]['length'][()]\n valid = [0 if not i.any() else 1 for i in video_group['fts'][()]]\n video_group['valid'] = np.array(valid)\n","repo_name":"hzp3517/ABAW_VA_2022","sub_path":"preprocess/get_denseface.py","file_name":"get_denseface.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74772482825","text":"from datetime import datetime\nfrom datetime import timedelta\nfrom aiohttp import ClientSession, ClientConnectorError\nimport requests\nimport urllib\nfrom urllib.parse import urlparse\nimport json\nimport httplib2 as http\nimport sys\nimport os\nimport time\nimport telepot\nfrom telepot.loop import MessageLoop\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton,ReplyKeyboardMarkup,KeyboardButton\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom math import cos, sqrt, pi\noptions = Options()\n\n#driver = webdriver.Chrome(r'C:\\Users\\eugen\\chromedriver.exe') #windows\n#driver = webdriver.Chrome('/usr/bin/chromedriver')\n\n#driver.get(\"http://trainarrivalweb.smrt.com.sg/\")\n\nR = 6371000 #radius of the Earth in m\ndef mrt(chat_id,k):\n\n try:\n driver.find_element_by_xpath(f\"//*[@id='ddlStation']/option[contains(text(), '{k}')]\").click()\n time.sleep(0.8)\n station = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[1]/td/p\").text\n line = driver.find_element_by_xpath('/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/span[1]').text\n timing = driver.find_element_by_xpath('/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[1]/table/tbody/tr[2]/td[1]').text\n ending = driver.find_element_by_xpath('/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[1]/table/tbody/tr[3]/td[1]').text\n\n opptiming = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[2]/table/tbody/tr[2]/td[1]\").text\n oppending = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[2]/table/tbody/tr[3]/td[1]\").text\n message = f\"\"\"\nCurrent Station: {station} @ {line}\n==================================\nTowards: {ending}\nNext Train: {timing}\nTowards: {oppending}\nNext Train: {opptiming}\n==================================\n \"\"\"\n bot.sendMessage(chat_id,message)\n try:\n line2timing = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[3]/table/tbody/tr[2]/td[1]\").text\n line2ending = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[3]/table/tbody/tr[3]/td[1]\").text\n line2 = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/span[10]\").text\n line2timingopp = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[4]/table/tbody/tr[2]/td[1]\").text\n line2endingopp = driver.find_element_by_xpath(\"/html/body/form/div[3]/div/div/div/div[2]/div[2]/span/div/table/tbody/tr[2]/td/table/tbody/tr/td/div/div[4]/table/tbody/tr[3]/td[1]\").text\n message2 = f\"\"\"\nCurrent Station: {station} @ {line2}\n==================================\nTowards: {line2ending}\nNext Train: {line2timing}\nTowards: {line2endingopp}\nNext Train: {line2timingopp}\n==================================\n\"\"\"\n bot.sendMessage(chat_id,message2)\n\n except:\n 1\n except:\n k == \"exit\"\n\ndef woodlands(chat_id):\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(r'/root/chrome/chromedriver',chrome_options=chrome_options)\n driver.get(\"https://www.google.com/search?q=CIQ+to+woodlands&rlz=1C1KNTJ_enSG968SG968&oq=CIQ+to+woodlands&aqs=chrome..69i57j69i60l3.2806j0j9&sourceid=chrome&ie=UTF-8\")\n time.sleep(1)\n print(driver.title)\n duration1 = driver.find_element_by_xpath(\"/html/body/div[7]/div/div[10]/div/div[2]/div[2]/div/div/div[1]/div/div[2]/div/div/div[1]/div/div/div[4]/div[3]/div/div/div[2]/div[1]/div[1]/div/div/span[1]/span[1]\").text\n print(duration1)\n driver.get(\"https://www.google.com/search?q=woodlands+checkpoint+to+johor+CIQ&rlz=1C1KNTJ_enSG968SG968&ei=4EpyYsONBOSVseMPpbKv4Ao&ved=0ahUKEwjD_YW4x8X3AhXkSmwGHSXZC6wQ4dUDCA4&uact=5&oq=woodlands+checkpoint+to+johor+CIQ&gs_lcp=Cgdnd3Mtd2l6EAMyBQghEKABMgUIIRCgATIFCCEQoAE6BwgAEEcQsAM6BwghEAoQoAE6BAghEBU6BggAEBYQHjoICCEQFhAdEB5KBAhBGABKBAhGGABQuQpYkiJgySRoAXABeACAAVaIAdUDkgEBOJgBAKABAcgBCMABAQ&sclient=gws-wiz\")\n duration2 = driver.find_element_by_xpath(\"/html/body/div[7]/div/div[10]/div/div[2]/div[2]/div/div/div[1]/div/div[2]/div/div/div[1]/div/div/div[4]/div[3]/div/div/div[2]/div[1]/div[1]/div/div/span[1]/span[1]\").text\n time.sleep(1)\n print(driver.title)\n message = f\"\"\"\nWoodlands to CIQ: {duration1}\nCIQ to Woodlands: {duration2}\n \"\"\"\n bot.sendMessage(chat_id,message)\n driver.quit()\ndef Tuas(chat_id):\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(r'/root/chrome/chromedriver',chrome_options=chrome_options)\n driver.get(\"https://www.google.com/search?q=Asia-Pacific+Brewery+to+Rest+%26+Service+Area+2nd+Link&rlz=1C1KNTJ_enSG968SG968&ei=21JyYuuHEY3bz7sPx-K82A4&ved=0ahUKEwir4amGz8X3AhWN7XMBHUcxD-sQ4dUDCA4&uact=5&oq=Asia-Pacific+Brewery+to+Rest+%26+Service+Area+2nd+Link&gs_lcp=Cgdnd3Mtd2l6EAM6BwgAEEcQsAM6BQghEKABOgQIIRAVSgQIQRgASgQIRhgAUD5Y4h5gjyFoAXAAeACAAXeIAbwEkgEDOC4xmAEAoAEBoAECyAEIwAEB&sclient=gws-wiz\")\n time.sleep(1)\n print(driver.title)\n duration1 = driver.find_element_by_xpath(\"/html/body/div[7]/div/div[10]/div/div[2]/div[2]/div/div/div[1]/div/div[2]/div/div/div[1]/div/div/div[4]/div[3]/div/div/div[2]/div[1]/div[1]/div/div/span[1]/span[1]\").text\n print(duration1)\n driver.get(\"https://www.google.com/search?q=Rest+%26+Service+Area+2nd+Link+to+Opp+Asia-Pacific+Brewery&rlz=1C1KNTJ_enSG968SG968&ei=kFJyYviEOdjAz7sP9JSF6AI&ved=0ahUKEwj4jPDizsX3AhVY4HMBHXRKAS0Q4dUDCA4&uact=5&oq=Rest+%26+Service+Area+2nd+Link+to+Opp+Asia-Pacific+Brewery&gs_lcp=Cgdnd3Mtd2l6EAM6BwgAEEcQsAM6BAghEBU6BQghEKABSgQIQRgASgQIRhgAUGFYshlgzBtoAXABeACAAcUBiAHYDZIBBDIzLjGYAQCgAQGgAQLIAQjAAQE&sclient=gws-wiz\")\n duration2 = driver.find_element_by_xpath(\"/html/body/div[7]/div/div[10]/div/div[2]/div[2]/div/div/div[1]/div/div[2]/div/div/div[1]/div/div/div[4]/div[3]/div/div/div[2]/div[1]/div[1]/div/div/span[1]/span[1]\").text\n time.sleep(1)\n print(driver.title)\n message = f\"\"\"\nTuas to Johor: {duration1}\nJohor to Tuas: {duration2}\n \"\"\"\n bot.sendMessage(chat_id,message)\n driver.quit()\n\ndef get(bus_stop):\n\n api_key = 'fmvxpJuBSEqtzoDSPGu/uw=='\n headers = { 'AccountKey' : api_key,'accept' : 'application/json'} #this is by default\n uri = 'http://datamall2.mytransport.sg/' #Resource URL\n path = f'ltaodataservice/BusArrivalv2?BusStopCode={bus_stop}' \n target = urlparse(uri + path)\n #print (target.geturl())\n method = 'GET'\n body = ''\n h = http.Http()\n response,content = h.request(\n target.geturl(),\n method,\n body,\n headers)\n\n jsonObj = json.loads(content)\n #print(json.dumps(jsonObj, sort_keys=True, indent=4))\n \n with open(r\"C:\\Users\\eugen\\OneDrive\\Codes\\BusBot\\bus_routes.json\",\"w\") as outfile:\n #Saving jsonObj[\"d\"]\n json.dump(jsonObj, outfile, sort_keys=True, indent=4, ensure_ascii=False)\n \n\n\ndef distance(lon1, lat1, lon2, lat2):\n x = (lon2 - lon1) * cos(0.5*(lat2+lat1))\n y = (lat2 - lat1)\n return (2*pi*R/360) * sqrt( x*x + y*y )\n\n\n\ndef out(i,data):\n\n try:\n \n service = data[\"Services\"][i][\"ServiceNo\"]\n arrive = data[\"Services\"][i][\"NextBus\"][\"EstimatedArrival\"]\n arrive2 = data[\"Services\"][i][\"NextBus2\"][\"EstimatedArrival\"]\n load = data[\"Services\"][i][\"NextBus\"][\"Load\"]\n load2 = data[\"Services\"][i][\"NextBus2\"][\"Load\"]\n dest = data[\"Services\"][i][\"NextBus\"][\"DestinationCode\"]\n bus_num = data[\"Services\"][i][\"ServiceNo\"]\n size = data[\"Services\"][i][\"NextBus\"][\"Type\"]\n size2 = data[\"Services\"][i][\"NextBus2\"][\"Type\"]\n for j in range(0,12):\n try:\n #f = open(rf\"C:\\Users\\eugen\\OneDrive\\Codes\\BusBot\\bus_stops{j}.json\") #windows\n f = open(rf\"bus_stops{j}.json\")\n data = json.load(f)\n for k in range(0,500):\n try: \n bus_stop_code = data[\"value\"][k][\"BusStopCode\"]\n if(bus_stop_code == dest):\n term = (data[\"value\"][k][\"Description\"])\n break\n except:\n 1\n except:\n 1\n# arrive3 = data[\"Services\"][i][\"NextBus3\"][\"EstimatedArrival\"]\n arrived = arrive[11:16]\n arrived2 = arrive2[11:16]\n# arrived3 = arrive3[11:16]\n #current = datetime.now()\n current = datetime.now() + timedelta(hours = 8)\n current_time = current.strftime(\"%H:%M\")\n \n FMT = '%H:%M'\n# tdelta3 = str(datetime.strptime(arrived3, FMT) - datetime.strptime(current_time, FMT))\n tdelta2 = str(datetime.strptime(arrived2, FMT) - datetime.strptime(current_time, FMT))\n tdelta = str(datetime.strptime(arrived, FMT) - datetime.strptime(current_time, FMT))\n# mins3 = tdelta2[2:4]\n mins2 = tdelta2[2:4]\n mins = tdelta[2:4]\n \n\n #print(current_time)\n \n if (mins == ' d'):\n mins = 'Delayed'\n elif mins == '00':\n mins = 'Arriving'\n else:\n mins = f'{mins} minutes'\n \n if (mins2 == ' d'):\n mins2 = 'Delayed'\n elif mins2 == '00':\n mins2 = 'Arriving'\n else:\n mins2 = f'{mins2} minutes'\n \n\n# if mins3 == ' d':\n# mins3 = 'Delayed'\n# elif mins3 == '00':\n# mins3 = 'Arriving'\n# else:\n# mins3 = f'{mins3} minutes'\n \n \n sentence = f\"\"\"\nService: {service} {term}\nArrival Time: {mins} Type: {size} Load: {load}\nArrival Time: {mins2} Type: {size2} Load: {load2} \n \n\"\"\"\n return sentence\n\n \n\n except:\n 1\n \ndef handle(msg):\n \n content_type, chat_type, chat_id = telepot.glance(msg)\n #print(variable)\t\n if content_type == 'text':\n variable = msg['text']\n if variable[0:1] == '/':\n variable = variable[1:]\n bot.sendMessage(chat_id,variable)\n else:\n bot.sendMessage(chat_id,variable)\n# if variable == 'stopbot':\n# driver.quit()\n# os._exit(0)\n \n if msg['text'] == '/start':\n markup = ReplyKeyboardMarkup(one_time_keyboard = True,resize_keyboard = True,keyboard=[\n [KeyboardButton(text='Bus Stop No.')],\n [KeyboardButton(text='Location',request_location=True)],\n [KeyboardButton(text='Singapore-Johor')],\n ])\n bot.sendMessage(chat_id, 'Seclect Option', reply_markup=markup)\n \n elif variable.isnumeric():\n bus_stop = variable\n print(bus_stop)\n\n \n for j in range(0,12):\n try:\n #f = open(rf\"C:\\Users\\eugen\\OneDrive\\Codes\\BusBot\\bus_stops{j}.json\") #windows\n f = open(rf\"bus_stops{j}.json\")\n data = json.load(f)\n\n for i in range(0,500):\n try: \n bus_stop_code = data[\"value\"][i][\"BusStopCode\"]\n if(bus_stop_code == bus_stop):\n des = (data[\"value\"][i][\"Description\"])\n\n break\n\n\n except:\n 1\n except:\n 1\n namespace =f'Bus Stop: {bus_stop} {des} \\n ==========================\\n'\n get(bus_stop)\n f = open(r\"C:\\Users\\eugen\\OneDrive\\Codes\\BusBot\\bus_routes.json\") \n data = json.load(f)\n for i in range(0,20):\n #out(i,data)\n if out(i,data) != None:\n namespace = namespace + out(i,data)\n namespace = namespace + \"\\nSEA (for Seats Available) \\nSDA (for Standing Available) \\nLSD (for Limited Standing)\"\n bot.sendMessage(chat_id,format(namespace))\n# elif all(x.isalpha() or x.isspace() for x in variable):\n# station = variable[1:-3]\n# print(station)\n# mrt(chat_id,station)\n elif msg[\"text\"] == 'Bus Stop No.':\n bot.sendMessage(chat_id,\"Send me 5 Digit Bus Stop Code\")\n elif msg[\"text\"] == 'Singapore-Johor':\n bot.sendMessage(chat_id,\"/Tuas or /Woodlands\")\n elif variable == 'Woodlands':\n travel = \"\"\"\nNormal:0-30 mins\nHeavy:30-60 mins\nSevere: >60 mins\n \"\"\"\n bot.sendMessage(chat_id,travel)\n woodlands(chat_id)\n elif variable == 'Tuas':\n travel = \"\"\"\nNormal:0-30 mins\nHeavy:30-60 mins\nSevere: >60 mins\n \"\"\"\n bot.sendMessage(chat_id,travel)\n Tuas(chat_id)\n else:\n bot.sendMessage(chat_id,\"I dont understand, press /start\")\n elif content_type == 'location':\n dist =[]\n chat =''\n location = msg['location']\n lattitude = location[\"latitude\"]\n longitude = location[\"longitude\"]\n #print(lattitude)\n #print(longitude)\n for j in range(0,12):\n try:\n #f = open(rf\"C:\\Users\\eugen\\OneDrive\\Codes\\BusBot\\bus_stops{j}.json\") #windows\n f = open(rf\"bus_stops{j}.json\")\n data = json.load(f)\n for i in range(0,500):\n try: \n lat = (data[\"value\"][i][\"Latitude\"])\n log = (data[\"value\"][i][\"Longitude\"])\n dista = distance(longitude,lattitude,log,lat)\n #print(f\"{lat} {log}\")\n stops = [dista,lat,log,i,j]\n dist.append(stops) \n \n except:\n 1\n except:\n 1\n sort = sorted(dist)\n #print(sort[0:5][0:5])\n for a in range(0,5):\n #k = open(rf\"C:\\Users\\eugen\\OneDrive\\Codes\\BusBot\\bus_stops{sort[a][4]}.json\") #windows\n k = open(rf\"bus_stops{sort[a][4]}.json\")\n maped = json.load(k)\n nearest_stop = maped[\"value\"][sort[a][3]][\"Description\"]\n distance_from = round(sort[a][0])\n nearest_stop_code = maped[\"value\"][sort[a][3]][\"BusStopCode\"]\n \n message = f\"\"\"\nNearest Stop: {nearest_stop} \nBus Stop Code: /{nearest_stop_code} \nDistance: {distance_from}m\n\"\"\"\n chat = chat + message\n bot.sendMessage(chat_id,chat)\n \n else:\n bot.sendMessage(chat_id,\"I dont understand\")\n print(\"Null\")\n \nif __name__ == \"__main__\":\n bot = telepot.Bot('XXXXX')\n MessageLoop(bot,handle).run_as_thread()\n \n while 1:\n time.sleep(1)\n","repo_name":"EangJS/Telegram-Bus-Bot","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"969090734","text":"from wtforms import Form, TextField, validators, SubmitField\r\nfrom flask import Flask, render_template, request\r\nfrom utils import eval_from_input\r\n\r\n#create app\r\napp = Flask(__name__)\r\n\r\nclass WebForm(Form):\r\n eval_text = TextField(\"Please enter some text to be evaluated\",\r\n validators=[validators.InputRequired()])\r\n submit = SubmitField(\"Enter\")\r\n\r\n@app.route(\"/\", methods=['GET', 'POST'])\r\ndef home():\r\n \"home page with form\"\r\n form = WebForm(request.form)\r\n\r\n if request.method == 'POST' and form.validate():\r\n link = request.form['eval_text']\r\n return render_template('eval_text.html',\r\n input=eval_from_input(link=link))\r\n\r\n return render_template('index.html', form=form)\r\n\r\nif __name__ == \"__main__\":\r\n print((\"* Loading model and Flask starting server...\"\r\n \"please wait until server has fully started\"))\r\n # Run app\r\n app.run(host=\"0.0.0.0\")","repo_name":"Mitchwatts93/damonnet","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34135122931","text":"# Imports\r\nfrom PlannerProto_pb2 import ScenarioConcludedNotificationPb, \\\r\n ScenarioInitializedNotificationPb # Scenario start/end notifications\r\nfrom PlannerProto_pb2 import ErrorPb # Error messsage if scenario fails\r\nfrom PlannerProto_pb2 import StatePb, AssetPb, TrackPb # Simulation state information\r\nfrom PlannerProto_pb2 import OutputPb, ShipActionPb, WeaponPb\r\nfrom publisher import Publisher\r\nfrom AiManager import AiManager\r\nfrom google.protobuf.json_format import MessageToDict, ParseDict\r\nimport json\r\nfrom pathlib import Path\r\nimport PlannerProto_pb2\r\nimport numpy as np\r\nimport re\r\nimport time\r\nimport math\r\n\r\nclass State(AiManager):\r\n # Constructor\r\n def __init__(self, publisher: Publisher):\r\n print(\"Constructing AI Manager\")\r\n self.ai_pub = publisher\r\n self.memory = []\r\n # depends on model\r\n self.directory = Path(\"./output4/\")\r\n self.base_file = \"ttd_state.json\"\r\n self.filepath = self.get_next_filepath(self.directory, self.base_file)\r\n self.ifYouShootShutUp = []\r\n self.targeted_track_ids=[]\r\n\r\n # Is passed StatePb from Planner\r\n def receiveStatePb(self, msg: StatePb):\r\n # self.printStateAsDict(msg)\r\n output_message: OutputPb = OutputPb()\r\n output_message = self.generateState(msg)\r\n self.ai_pub.publish(output_message)\r\n\r\n # This method/message is used to notify of new scenarios/runs\r\n def receiveScenarioInitializedNotificationPb(self, msg: ScenarioInitializedNotificationPb):\r\n print(\"Scenario run: \" + str(msg.sessionId))\r\n\r\n # This method/message is used to nofify that a scenario/run has ended\r\n def receiveScenarioConcludedNotificationPb(self, msg: ScenarioConcludedNotificationPb):\r\n print(\"Ended Run: \" + str(msg.sessionId) + \" with score: \" + str(msg.score))\r\n\r\n # Example function for building OutputPbs, returns OutputPb\r\n\r\n # function to get the next available file path\r\n def get_next_filepath(self, directory, base_filename):\r\n index = 1\r\n while True:\r\n filename = f\"{base_filename}_{index}.json\"\r\n filepath = directory / filename\r\n if not filepath.exists():\r\n return filepath\r\n index += 1\r\n\r\n def generateState(self, msg: StatePb):\r\n json_dict = self.cleanState(msg)\r\n # (Shane) Append state as dictionary to output file\r\n\r\n # needs to make new file if file already tyhere\r\n\r\n with open(self.filepath, \"a\") as file:\r\n file.write(json.dumps(json_dict) + \"\\n\")\r\n\r\n # idle\r\n if 'Tracks' not in json_dict:\r\n print(\"idle\")\r\n output_message: OutputPb = OutputPb()\r\n return output_message\r\n else:\r\n origin = self.calculateOrigin(json_dict['assets'])\r\n # execution order is a list of tuples (time, id)\r\n\r\n\r\n execution_order = self.calculateExecutionOrder(json_dict['Tracks'], origin)\r\n #print(json_dict[\"Tracks\"])\r\n pattern=r\":(\\d+)\"\r\n #make a memory of what you have shot at by parsing for \"Chainshot_17>enemy_track:18\"\r\n for track in json_dict[\"Tracks\"]:\r\n if track[\"ThreatRelationship\"]==\"Friendly\":\r\n #print(track[\"ThreatRelationship\"])\r\n match = re.search(pattern, track[\"ThreatId\"])\r\n self.targeted_track_ids.append(int(match.group(1)))\r\n output_message: OutputPb = OutputPb()\r\n\r\n self.memory = [x for x in self.memory if x in self.targeted_track_ids]\r\n # if track with the track id has an enemy with the corresponding enemy value in memory\r\n\r\n # OR just make it so memory appends the track_ids\r\n execution_order = [enemy for enemy in execution_order if enemy[1] not in self.memory]\r\n # execution_order = [enemy for enemy in execution_order if enemy[1] not in self.active_defense]\r\n # print(execution_order)\r\n # make sure its still sorted (it should be lol)\r\n execution_order = sorted(execution_order, key=lambda x: x[0])\r\n\r\n # This should fix your memory issue\r\n #check tracking data to see if a friendly missle will hit (just make a memory from missles attacking enemies)\r\n # if len(self.memory) == 30:\r\n # self.memory = []\r\n # self.ifYouShootShutUp = []\r\n\r\n for missle in execution_order:\r\n ship_action: ShipActionPb = ShipActionPb()\r\n # set the target id to the missle id\r\n ship_action.TargetId = missle[1]\r\n ship_action.weapon, ship_action.AssetName \\\r\n = self.whoShootsFirst(json_dict['assets'], missle[1])\r\n\r\n if ship_action.weapon != \"\":\r\n output_message.actions.append(ship_action)\r\n\r\n else:\r\n self.ifYouShootShutUp = []\r\n return output_message\r\n #print(\"printing execution order\")\r\n #print(execution_order)\r\n print(\"printing memory\")\r\n #print(self.memory)\r\n self.ifYouShootShutUp = []\r\n #print(\"printing output message\")\r\n #print(output_message.actions)\r\n return output_message\r\n\r\n def find_value(self, list_of_dicts, key1, value1, key2, target_id):\r\n \"\"\"\r\n Find a value in a list of dictionaries that contain another list of dictionaries.\r\n\r\n Args:\r\n - list_of_dicts: a list of dictionaries that contain another list of dictionaries.\r\n - key1: the key to search for in the first level of dictionaries.\r\n - value1: the value to search for in the first level of dictionaries.\r\n - key2: the key to search for in the second level of dictionaries.\r\n\r\n Returns:\r\n - The value associated with the key2 in the first matching dictionary.\r\n - None if the value1 is not found in any of the dictionaries.\r\n \"\"\"\r\n whosShooting = \"\"\r\n withWhat = \"\"\r\n # update after you send a message each time\r\n # ifYouShootShutUp=[]\r\n for dictionary in list_of_dicts:\r\n # if dictionary.get(key1) != value1:\r\n if dictionary.get(key1) != value1: # and dictionary.get(\"AssetName\") not in self.ifYouShootShutUp:\r\n # print(self.ifYouShootShutUp)\r\n whosShooting = dictionary.get(\"AssetName\")\r\n for nested_dict in dictionary.get(\"weapons\"):\r\n if key2 in nested_dict.keys() and (\r\n dictionary.get(\"AssetName\"), nested_dict.get(\"SystemName\")) not in self.ifYouShootShutUp:\r\n withWhat = nested_dict.get(\"SystemName\")\r\n self.memory.append(target_id)\r\n self.ifYouShootShutUp.append((whosShooting, withWhat))\r\n return withWhat, whosShooting\r\n return \"\", \"\"\r\n\r\n def whoShootsFirst(self, assets, target_id) -> tuple:\r\n # start over because its not working\r\n return self.find_value(assets, \"health\", -1, \"Quantity\", target_id)\r\n # '''\r\n def calculateProtectionOrder(self):\r\n executionOrder = []\r\n\r\n #sort by HVU\r\n for target in self.distanceList:\r\n if 'HVU' in target[0]:\r\n executionOrder.append(target)\r\n #self.memory.append(target[1])\r\n #sort by lowest health ship\r\n\r\n self.distanceList = sorted(self.distanceList, key=lambda x: x[3])\r\n\r\n for target in self.distanceList:\r\n if 'HVU' not in self.distanceList:\r\n executionOrder.append(target)\r\n #self.memory.append(target[1])\r\n return executionOrder\r\n def calculateWhoTargeted(self, missile_list, assets):\r\n closestDistance = 100000\r\n trackID = 0\r\n #distanceList = []\r\n for missile in missile_list:\r\n #distanceList = []\r\n for asset in assets:\r\n\r\n if asset['health'] == -1:\r\n pass\r\n else:\r\n dist = math.sqrt((asset['PositionX'] - missile['PositionX']) ** 2 + (\r\n asset['PositionY'] - missile['PositionY']) ** 2)\r\n magnitude = math.sqrt(missile['PositionX'] ** 2 + missile['PositionY'] ** 2)\r\n # print(f\"Missile {missile['TrackId']} Position {missile['TrackId']} X {missile['PositionX']} Missile Y {missile['PositionY']}\")\r\n # print(f\"Missile Velocity {missile['TrackId']} XV {missile['VelocityX']} Missile YV {missile['VelocityY']}\")\r\n # print(f\"Ship {asset['AssetName']} is at {asset['PositionX']}, {asset['PositionY']}\")\r\n\r\n # Given values\r\n missile_x = missile['PositionX']\r\n missile_y = missile['PositionY']\r\n missile_vx = missile['VelocityX']\r\n missile_vy = missile['VelocityY']\r\n\r\n ship_x = asset['PositionX']\r\n ship_y = asset['PositionY']\r\n\r\n # Calculate slope and intercept of missile trajectory\r\n m = missile_vy / missile_vx\r\n b = missile_y - m * missile_x\r\n\r\n # Calculate closest distance between missile trajectory and ship\r\n distance = abs(-1 * m * ship_x + ship_y - b) / math.sqrt(m ** 2 + 1)\r\n\r\n add = True\r\n\r\n for tuple in self.distanceList:\r\n if asset['AssetName']==tuple[0] and missile['TrackId']==tuple[1]:\r\n add = False\r\n\r\n if add == True:\r\n self.distanceList.append((asset['AssetName'], missile['TrackId'], distance, asset['health']))\r\n\r\n self.distanceList = sorted(self.distanceList, key=lambda x: x[2])\r\n\r\n\r\n\r\n @staticmethod\r\n def calculateExecutionOrder(missle_list, origin) -> list: # of tuples\r\n # t = (s/m) * m = s\r\n intercept_times = []\r\n # get position, divide by speed, sort by lowest time to intercept\r\n # exclude elevation for now\r\n for missle in missle_list:\r\n if \">\" in missle.get(\"ThreatId\"):\r\n pass\r\n else:\r\n\r\n # easier to read\r\n x = missle['PositionX']\r\n y = missle['PositionY']\r\n vx = missle['VelocityX']\r\n vy = missle['VelocityY']\r\n\r\n # distance eqn\r\n d = np.sqrt((x - origin[0]) ** 2 + (y - origin[1]) ** 2)\r\n # velocity eqn\r\n dv = np.sqrt(vx ** 2 + vy ** 2)\r\n\r\n # time to intercept\r\n t = d / dv\r\n #print(missle)\r\n intercept_times.append((t, missle['TrackId']))\r\n\r\n return intercept_times#, key=lambda x: x[0]\r\n\r\n @staticmethod\r\n def calculateOrigin(asset_list) -> np.ndarray:\r\n gamma = 1e-6\r\n origin = np.array([0, 0], dtype=float)\r\n for asset in asset_list:\r\n if 'health' in asset.keys():\r\n if asset['health'] == -1:\r\n pass\r\n else:\r\n origin += np.array([asset['PositionX'], asset['PositionY']])\r\n else:\r\n # when no healths just return zero (some bug)\r\n return 0\r\n return origin / (len(asset_list) - 1 + gamma)\r\n\r\n def cleanState(self, msg: StatePb):\r\n # StatePb\r\n message = PlannerProto_pb2.StatePb()\r\n # serialize the message\r\n msg = msg.SerializeToString()\r\n message.ParseFromString(msg)\r\n message_dict = MessageToDict(message)\r\n # json_str = json.dumps(message_dict)\r\n # print(json_str)\r\n return message_dict\r\n # print(json_str)\r\n # with open(\"./state.json\", 'w') as f:\r\n # f.write(json_str)\r\n # import sys\r\n # sys.exit()\r\n\r\n # def printStateAsDict(self, msg:StatePb):\r\n # from protobuf_to_dict import protobuf_to_dict, get_field_names_and_options\r\n # for field, field_name, options in get_field_names_and_options(PlannerProto_pb2.StatePb):\r\n # print('name: {}, options: {}'.format(field_name, options))\r\n\r\n def createActions(self, msg: StatePb):\r\n\r\n # ShipActionPb's go into an OutputPb message\r\n output_message: OutputPb = OutputPb()\r\n # print(\"**********************************\")\r\n import time\r\n time.sleep(.25)\r\n # import sys\r\n # sys.exit()\r\n # ShipActionPb's are built using the same sytax as the printStateInfo function\r\n\r\n # sample shooting a ship heuristically\r\n ship_action: ShipActionPb = ShipActionPb()\r\n # for track in msg.Tracks:\r\n # target = track.TrackId\r\n target = 1\r\n ship_action.TargetId = target\r\n # asset name\r\n for asset in msg.assets:\r\n myAsset = str(asset.AssetName)\r\n ship_action.AssetName = myAsset\r\n ship_action.AssetName = \"Galleon HVU\"\r\n ship_action.weapon = \"Chainshot_System\"\r\n # ship_action: ShipActionPb = ShipActionPb()\r\n # ship_action.TargetId = 2\r\n # ship_action.AssetName = \"Galleon HVU\"\r\n # ship_action.weapon = \"Chainshot_System\"\r\n\r\n # As stated, shipActions go into the OutputPb as a list of ShipActionPbs\r\n output_message.actions.append(ship_action)\r\n return output_message\r\n\r\n # Function to print state information and provide syntax examples for accessing protobuf messags\r\n def printStateInfo(self, msg: StatePb):\r\n pass\r\n # print(\"Time: \" + str(msg.time))\r\n # print(\"Score: \" + str(msg.score))\r\n\r\n # # Accessing asset fields. Notice that is is simply the exact name as seen\r\n # # In PlannerProto.proto\r\n # print(\"Assets:\")\r\n # for asset in msg.assets:\r\n # print(\"1: \" + str(asset.AssetName))\r\n # print(\"2: \" + str(asset.isHVU))\r\n # print(\"3: \" + str(asset.health))\r\n # print(\"4: \" + str(asset.PositionX))\r\n # print(\"5: \" + str(asset.PositionY))\r\n # print(\"6: \" + str(asset.PositionZ))\r\n # print(\"7: \" + str(asset.Lle))\r\n # print(\"8: \" + str(asset.weapons))\r\n # print(\"--------------------\")\r\n\r\n # # Accessing track information is done the same way.\r\n # print(\"Tracks:\")\r\n # for track in msg.Tracks:\r\n # print(\"1: \" + str(track.TrackId))\r\n # print(\"2: \" + str(track.ThreatId))\r\n # print(\"3 \" + str(track.ThreatRelationship))\r\n # print(\"4: \" + str(track.Lle))\r\n # print(\"5: \" + str(track.PositionX))\r\n # print(\"6: \" + str(track.PositionY))\r\n # print(\"7: \" + str(track.PositionZ))\r\n # print(\"8: \" + str(track.VelocityX))\r\n # print(\"9 \" + str(track.VelocityY))\r\n # print(\"10: \" + str(track.VelocityZ))\r\n # print(\"**********************************\")\r\n\r\n\r\n\r\n","repo_name":"csaben/dod-comp","sub_path":"State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":15117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24035800471","text":"from random import random \nfrom datetime import date\n\ndef pengeccekan_leapYear(tanggal, bulan, tahun) :\n if tahun % 4 == 0 and (tahun % 100 != 0 or tahun % 400 == 0) :\n return \"Leap Year\"\n else :\n return \"Not a Leap year\"\n\ndef pengecekan_Umur(tangal, bulan, tahun) :\n if tahun > currentyear :\n print (\"kamu telah salah memasukan tahun\")\n raise Exception\n elif tahun == currentyear :\n if bulan == currentmonth :\n umurhari = currentdate - tanggal\n print (\"Hallo babe, umur mu {} hari\".format(umurhari))\n if bulan < currentmonth :\n umurbulan = currentmonth - bulan\n umurhari = currentdate - tanggal\n print(\"Hallow babe, umur mu {} bulan dan {} hari\\n\".format(umurbulan,umurhari))\n elif bulan > currentmonth :\n print(\"Tolong masukan bulan yang benar\")\n raise Exception\n elif tahun < currentyear :\n if bulan == currentmonth :\n if tanggal > currentdate :\n umur = (currentyear - tahun) - 1\n if tanggal <= currentdate :\n umur = currentyear - tahun\n if bulan < currentmonth :\n umur = currentyear - tahun\n if bulan > currentmonth :\n umur = (currentyear - tahun) - 1 \n return umur \ncurrentdate = date.today()\nprint (\"\\ntoday's date ..\", currentdate, \"\\n=========================================\\n\")\n\ncurrdate = str(currentdate).split(\"-\")\ncurrentyear = int(currdate[0])\ncurrentmonth = int(currdate[1])\ncurrentdate = int(currdate[2])\n\nwhile True :\n print ('there is a proverbs said that \\n\"your maturity are not defined by your age.\"\\n====================================================\\n')\n print (\"welcome to how young are you?\")\n print(\"1. start liat seberapa mudanya anda?\")\n print(\"2. Exit\")\n choose = int(input(\">>_ \"))\n x = False\n if (choose == 2) :\n print(\"Terima kasih telah bermain bersama saya\")\n break\n elif(choose == 1) :\n while True :\n dateinput = input(\"Masukan Tanggal yang ingin anda cek\")\n # print(date.__len__())\n if(len(dateinput) == 8) :\n # print(int(date[0:2]))\n try :\n tanggal = int(dateinput[0:2]) \n bulan = int(dateinput[2:4])\n tahun = int(dateinput[4:8])\n except :\n print(\"Tolong masukan angka saja\")\n # if type(date) is int :\n else :\n print()\n break\n # else :\n # print(\"kamu telah menepati format 8 char but, Tolong masukan angka saja\")\n else :\n print(\"Tolong input panjang 8 angka saja\") \n checkleap = pengeccekan_leapYear(tanggal, bulan, tahun)\n if bulan < 13 and bulan > 0 :\n if checkleap.__eq__(\"Leap Year\") :\n if bulan == 2 :\n if tanggal > 0 and tanggal < 30 :\n x = True\n else :\n print(\"Tolonag masukan tanggal yang benar bulan ini hanya mempunyai 29 hari karena bulan 2 tahun kabisat\")\n elif bulan == 1 or bulan == 3 or bulan == 5 or bulan == 7 or bulan == 8 and bulan == 10 and bulan == 12 :\n if tanggal > 0 and tanggal <= 31 :\n x = True\n else :\n print(\"Tolonag masukan tanggal yang benar bulan ini hanya mempunyai 31 hari\")\n elif bulan == 4 or bulan == 6 or bulan == 9 or bulan == 11 :\n if tanggal > 0 and tanggal <= 30 :\n x = True\n else :\n print(\"Tolonag masukan tanggal yang benar bulan ini hanya mempunyai 30 hari\")\n elif checkleap.__eq__(\"Not a Leap year\") :\n if bulan == 2 :\n if tanggal > 0 and tanggal < 29 :\n x = True\n else :\n print(\"Tolonag masukan tanggal yang benar bulan ini hanya mempunyai 28 hari karena bulan 2 tahun non-kabisat\")\n elif bulan == 1 or bulan == 3 or bulan == 5 or bulan == 7 or bulan == 8 and bulan == 10 and bulan == 12 :\n if tanggal > 0 and tanggal <= 31 :\n x = True\n else :\n print(\"Tolonag masukan tanggal yang benar bulan ini hanya mempunyai 31 hari\")\n elif bulan == 4 or bulan == 6 or bulan == 9 or bulan == 11 :\n if tanggal > 0 and tanggal <= 30 :\n x = True\n else :\n print(\"Tolong masukan tanggal yang benar bulan ini hanya mempunyai 30 hari\")\n print (\"anda lahir pada tahun \", tahun) \n print (\"anda lahir pada bulan \", bulan) \n print (\"anda lahir pada tanggal {}\".format(tanggal))\n\n \n else : \n print(\"Tolong format masukan bulan yang benar\")\n umur = pengecekan_Umur(tanggal, bulan, tahun)\n if (umur != None) :\n print(\"\\numur anda adalah {} \\n\".format(umur))\n input(\"Press enter to continue..\")\n\n \n\n \n\n\n\n\n","repo_name":"VinzZ00/how-young-are-you","sub_path":"how-young-are-you.py","file_name":"how-young-are-you.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3363258160","text":"from zad6testy import runtests\n\n\ndef dfs(v, R, T, G, vis):\n for u in G[v]:\n if T[u] == -1:\n R[v] = u\n T[u] = v\n return True\n\n for u in G[v]:\n if not vis[u]:\n vis[u] = True\n res = dfs(T[u], R, T, G, vis)\n vis[u] = False\n if res:\n R[v] = u\n T[u] = v\n return True\n return False\n\n\ndef poszerz(v, R, T, G):\n vis = [False]*len(G)\n dfs(v, R, T, G, vis)\n\n\ndef binworker(G):\n n = len(G)\n R = [-1]*n\n T = [-1]*n\n for i in range(n):\n poszerz(i, R, T, G)\n s = 0\n for el in R:\n if el != -1:\n s += 1\n return s\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(binworker, all_tests=False)\n","repo_name":"ReptilianEye/ASD","sub_path":"offline/zadanie6/zad6V2.py","file_name":"zad6V2.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35373512855","text":"from rest_framework import mixins\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom src.apps.base.api.mixins import SerializerPerActionMixin\nfrom src.apps.users.models.student_groups import StudentGroup\nfrom src.apps.users.models.profiles import StudentProfile\nfrom src.apps.users.api.serializers.students_groups import (\n StudentGroupSerializer\n)\n\n\nclass StudentGroupViewSet(\n SerializerPerActionMixin,\n mixins.ListModelMixin,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n GenericViewSet,\n):\n queryset = StudentGroup.objects.all()\n action_serializer = {\n \"default\": StudentGroupSerializer,\n # \"list\": StudentGroupListSerializer,\n }\n\n @action(methods=['PUT'], detail=True, url_name='add-student',\n url_path='add-student//',\n permission_classes=(IsAuthenticated,))\n def add_student(self, request, **kwargs):\n student_group = self.get_object()\n student = StudentProfile.objects.get(pk=kwargs['student_pk'])\n student.group = student_group\n student.save()\n return Response\n","repo_name":"bezzyd/student_diary","sub_path":"src/apps/users/api/views/students_groups.py","file_name":"students_groups.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1523752592","text":"from turtle import Turtle, register_shape\n\n\nclass Shot(Turtle):\n\n #register_shape(\"alien.gif\")\n\n def __init__(self, xpos):\n super().__init__()\n self.shape(\"circle\")\n self.color(\"yellow\")\n self.shapesize(stretch_wid=0.3, stretch_len=1)\n self.penup()\n self.active = 1\n self.setpos(xpos, -250)\n self.setheading(90)\n\n def move(self):\n if self.active == 1:\n self.forward(5)\n if self.ycor() > 300:\n self.active = 0\n self.color(\"black\")","repo_name":"arjunbrara123/space_invaders","sub_path":"shot.py","file_name":"shot.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21931340841","text":"from collections import namedtuple\nfrom pathlib import Path\n\nimport fire\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, \\\n MaxPool2d, AdaptiveAvgPool2d, Sequential, Module\nfrom torchvision.datasets.folder import is_image_file\nfrom torchvision.transforms.functional import to_tensor, normalize\nfrom tqdm import tqdm\n\n\n################################## Original Arcface Model #############################################################\n\nclass Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\ndef l2_norm(input, axis=1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n return output\n\n\nclass SEModule(Module):\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = AdaptiveAvgPool2d(1)\n self.fc1 = Conv2d(\n channels, channels // reduction, kernel_size=1, padding=0, bias=False)\n self.relu = ReLU(inplace=True)\n self.fc2 = Conv2d(\n channels // reduction, channels, kernel_size=1, padding=0, bias=False)\n self.sigmoid = Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n return module_input * x\n\n\nclass bottleneck_IR(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut\n\n\nclass bottleneck_IR_SE(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR_SE, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n SEModule(depth, 16)\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut\n\n\nclass Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):\n '''A named tuple describing a ResNet block.'''\n\n\ndef get_block(in_channel, depth, num_units, stride=2):\n return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]\n\n\ndef get_blocks(num_layers):\n if num_layers == 50:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=4),\n get_block(in_channel=128, depth=256, num_units=14),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n elif num_layers == 100:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=13),\n get_block(in_channel=128, depth=256, num_units=30),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n elif num_layers == 152:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=8),\n get_block(in_channel=128, depth=256, num_units=36),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n return blocks\n\n\nclass Backbone(Module):\n def __init__(self, num_layers, drop_ratio, mode='ir'):\n super(Backbone, self).__init__()\n assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'\n assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'\n blocks = get_blocks(num_layers)\n if mode == 'ir':\n unit_module = bottleneck_IR\n elif mode == 'ir_se':\n unit_module = bottleneck_IR_SE\n self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),\n BatchNorm2d(64),\n PReLU(64))\n self.output_layer = Sequential(BatchNorm2d(512),\n Dropout(drop_ratio),\n Flatten(),\n Linear(512 * 7 * 7, 512),\n BatchNorm1d(512))\n modules = []\n for block in blocks:\n for bottleneck in block:\n modules.append(\n unit_module(bottleneck.in_channel,\n bottleneck.depth,\n bottleneck.stride))\n self.body = Sequential(*modules)\n\n def forward(self, x):\n x = self.input_layer(x)\n x = self.body(x)\n x = self.output_layer(x)\n return l2_norm(x)\n\n\n################################## MobileFaceNet #############################################################\n\nclass Conv_block(Module):\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(Conv_block, self).__init__()\n self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding,\n bias=False)\n self.bn = BatchNorm2d(out_c)\n self.prelu = PReLU(out_c)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.prelu(x)\n return x\n\n\nclass Linear_block(Module):\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(Linear_block, self).__init__()\n self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding,\n bias=False)\n self.bn = BatchNorm2d(out_c)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\nclass Depth_Wise(Module):\n def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):\n super(Depth_Wise, self).__init__()\n self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1))\n self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride)\n self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))\n self.residual = residual\n\n def forward(self, x):\n if self.residual:\n short_cut = x\n x = self.conv(x)\n x = self.conv_dw(x)\n x = self.project(x)\n if self.residual:\n output = short_cut + x\n else:\n output = x\n return output\n\n\nclass Residual(Module):\n def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):\n super(Residual, self).__init__()\n modules = []\n for _ in range(num_block):\n modules.append(\n Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups))\n self.model = Sequential(*modules)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass MobileFaceNet(Module):\n def __init__(self, embedding_size):\n super(MobileFaceNet, self).__init__()\n self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))\n self.conv2_dw = Conv_block(64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)\n self.conv_23 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128)\n self.conv_3 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv_34 = Depth_Wise(64, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256)\n self.conv_4 = Residual(128, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv_45 = Depth_Wise(128, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512)\n self.conv_5 = Residual(128, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv_6_sep = Conv_block(128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))\n self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0))\n self.conv_6_flatten = Flatten()\n self.linear = Linear(512, embedding_size, bias=False)\n self.bn = BatchNorm1d(embedding_size)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2_dw(out)\n out = self.conv_23(out)\n out = self.conv_3(out)\n out = self.conv_34(out)\n out = self.conv_4(out)\n out = self.conv_45(out)\n out = self.conv_5(out)\n out = self.conv_6_sep(out)\n out = self.conv_6_dw(out)\n out = self.conv_6_flatten(out)\n out = self.linear(out)\n out = self.bn(out)\n return l2_norm(out)\n\n\nclass FaceFeatures(object):\n def __init__(self, weights_path, device=\"cuda\"):\n self.device = device\n self.model = MobileFaceNet(512).to(device)\n self.model.load_state_dict(torch.load(weights_path))\n self.model.eval()\n\n @staticmethod\n def simple_crop(images: torch.Tensor):\n h, w = images.size(-2), images.size(-1)\n top = int(h / 2.1 * (0.8 - 0.33))\n bottom = int(h - (h / 2.1 * 0.3))\n size = bottom - top\n left = int(w / 2 - size / 2)\n right = left + size\n batch_tensor = images[:, :, top: bottom, left: right]\n\n return F.interpolate(batch_tensor, size=[112, 112], mode='bilinear', align_corners=True)\n\n def infer(self, batch_tensor):\n features = self.model(self.simple_crop(batch_tensor))\n return features\n\n def cosine_similarity(self, batch_tensor1, batch_tensor2):\n feature1 = self.infer(batch_tensor1)\n feature2 = self.infer(batch_tensor2)\n return torch.cosine_similarity(feature1, feature2)\n\n\ndef load_image(image_path, device=\"cuda\"):\n img = to_tensor(Image.open(image_path))\n img = img.unsqueeze_(dim=0).to(device)\n img = normalize(img, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n return img\n\n\ndef main(\n image_folder,\n original_image_folder=None,\n weights_path=\"./pretrained_models/model_mobilefacenet.pth\",\n device=\"cuda\"\n):\n \"\"\"\n calculate cosine similarity with ArcFace\n :param image_folder: folder contains faces.\n :param original_image_folder: if specified, use the same name file in this folder as original faces.\n :param weights_path:\n :param device:\n :return:\n \"\"\"\n image_folder = Path(image_folder)\n assert image_folder.exists()\n if original_image_folder is not None:\n original_image_folder = Path(original_image_folder)\n assert original_image_folder.exists()\n\n fv = FaceFeatures(weights_path, device=device)\n results = []\n pbar = tqdm(filter(lambda path: is_image_file(path.name), image_folder.glob(\"*\")))\n for image_file in pbar:\n if original_image_folder is None:\n vcs = fv.cosine_similarity(*torch.chunk(load_image(image_file), 2, dim=-1))\n else:\n cur_image = load_image(image_file, device=device)\n zero_image = load_image(original_image_folder / image_file.name, device=device)\n vcs = fv.cosine_similarity(cur_image, zero_image)\n pbar.set_description(f\"cosine_similarity: {vcs.item():.4f}\")\n results.append(vcs)\n\n print(f\"results: {torch.FloatTensor(results).mean().item():.4f}\")\n\n\nif __name__ == '__main__':\n torch.set_grad_enabled(False)\n fire.Fire(main)\n","repo_name":"budui/Control-Units-in-StyleGAN2","sub_path":"tools/evaluate/arcface_cosine_similarity.py","file_name":"arcface_cosine_similarity.py","file_ext":"py","file_size_in_byte":12432,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"73962834505","text":"import warnings\n\nimport torch\nimport torch.nn as nn\n\nfrom imaginaire.layers import Conv2dBlock, Res2dBlock\nfrom imaginaire.third_party.upfirdn2d import BlurDownsample\n\n\nclass ResDiscriminator(nn.Module):\n r\"\"\"Global residual discriminator.\n\n Args:\n image_channels (int): Num. of channels in the real/fake image.\n num_filters (int): Num. of base filters in a layer.\n max_num_filters (int): Maximum num. of filters in a layer.\n first_kernel_size (int): Kernel size in the first layer.\n num_layers (int): Num. of layers in discriminator.\n padding_mode (str): Padding mode.\n activation_norm_type (str): Type of activation normalization.\n ``'none'``, ``'instance'``, ``'batch'``, ``'sync_batch'``.\n weight_norm_type (str): Type of weight normalization.\n ``'none'``, ``'spectral'``, or ``'weight'``.\n aggregation (str): Method to aggregate features across different\n locations in the final layer. ``'conv'``, or ``'pool'``.\n order (str): Order of operations in the residual link.\n anti_aliased (bool): If ``True``, uses anti-aliased pooling.\n \"\"\"\n\n def __init__(self,\n image_channels=3,\n num_filters=64,\n max_num_filters=512,\n first_kernel_size=1,\n num_layers=4,\n padding_mode='zeros',\n activation_norm_type='',\n weight_norm_type='',\n aggregation='conv',\n order='pre_act',\n anti_aliased=False,\n **kwargs):\n super().__init__()\n for key in kwargs:\n if key != 'type' and key != 'patch_wise':\n warnings.warn(\n \"Discriminator argument {} is not used\".format(key))\n\n conv_params = dict(padding_mode=padding_mode,\n activation_norm_type=activation_norm_type,\n weight_norm_type=weight_norm_type,\n nonlinearity='leakyrelu')\n\n first_padding = (first_kernel_size - 1) // 2\n model = [Conv2dBlock(image_channels, num_filters,\n first_kernel_size, 1, first_padding,\n **conv_params)]\n for _ in range(num_layers):\n num_filters_prev = num_filters\n num_filters = min(num_filters * 2, max_num_filters)\n model.append(Res2dBlock(num_filters_prev, num_filters, order=order,\n **conv_params))\n if anti_aliased:\n model.append(BlurDownsample())\n else:\n model.append(nn.AvgPool2d(2, stride=2))\n if aggregation == 'pool':\n model += [torch.nn.AdaptiveAvgPool2d(1)]\n elif aggregation == 'conv':\n model += [Conv2dBlock(num_filters, num_filters, 4, 1, 0,\n nonlinearity='leakyrelu')]\n else:\n raise ValueError('The aggregation mode is not recognized'\n % self.aggregation)\n self.model = nn.Sequential(*model)\n self.classifier = nn.Linear(num_filters, 1)\n\n def forward(self, images):\n r\"\"\"Multi-resolution patch discriminator forward.\n\n Args:\n images (tensor) : Input images.\n Returns:\n (tuple):\n - outputs (tensor): Output of the discriminator.\n - features (tensor): Intermediate features of the discriminator.\n - images (tensor): Input images.\n \"\"\"\n batch_size = images.size(0)\n features = self.model(images)\n outputs = self.classifier(features.view(batch_size, -1))\n return outputs, features, images\n","repo_name":"NVlabs/imaginaire","sub_path":"imaginaire/discriminators/residual.py","file_name":"residual.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":3891,"dataset":"github-code","pt":"81"} +{"seq_id":"26891101057","text":"import numpy as np\nfrom glob import glob\nimport pandas as pd\nimport pyvoro\nimport sys,h5py\n\nsys.path.append('/cosma/home/dp004/dc-armi2/pywrap_HOD_fitting/src/')\nfrom hod import *\nfrom tpcf_obs import *\nfrom chi2 import *\nfrom astropy.cosmology import Planck15 as cosmo\n\nfrom mpi4py import MPI\ncomm =MPI.COMM_WORLD\n\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nvoro_dir = '/cosma7/data/dp004/dc-armi2/HOD_mocks/galaxy_catalogues/boxes/family/marks/'\n\nM = sys.argv[1]\nredshift = sys.argv[2]\nLbox=768\nNslices = 20\nls = Lbox/float(Nslices)\nOm0 = cosmo.Om0\nOl0 = 1 - cosmo.Om0\nz_snap = 0.3\n\nHz = 100.0*np.sqrt(Om0*(1.0+z_snap)**3 + Ol0)\nHz_i = (1+z_snap)/Hz\n\nlist_i = np.sort(glob('/cosma7/data/dp004/dc-armi2/HOD_mocks/galaxy_catalogues/boxes/family/mocks/Galaxy_'+M+'_'+redshift+'_*'))\n\nlist_all = np.array_split(list_i[:112],size)\nlist_chunk = list_all[rank]\n\nfor l in list_chunk:\n cat_name = l.split('/')[-1]\n G1 = np.load(l)\n #G0 = HOD_mock_subhaloes(theta,haloes_table,Lbox=Lbox,weights_haloes=None) \n vz = G1[:,5]\n z_obs = G1[:,2] + vz*Hz_i\n z_obs[z_obs < 0] += Lbox\n z_obs[z_obs > Lbox] -= Lbox\n G1_obs = np.array([G1[:,0],G1[:,1],z_obs]).T\n \n G1_all = []\n vols_all= []\n for c in range(Nslices):\n zi = c*ls\n zf = (c+1)*ls\n G1_chunk = G1_obs[:,(0,1,2)][(G1_obs[:,2]>zi)&(G1_obs[:,2]<=zf)]\n\n G1_unique, unique_index, unique_inverse = np.unique(G1_chunk,axis=0,return_index=True,return_inverse=True)\n #is_unique = np.zeros(len(G1_chunk),dtype=int)\n #is_unique[unique_index] = 1 \n\n V2D = pyvoro.compute_2d_voronoi(points=G1_unique[:,(0,1)],limits=[[0.,Lbox],[0.,Lbox]],dispersion=50.0,periodic=[True,True],)\n #\n voronoi_df = pd.DataFrame(data=V2D)\n #voronoi_df.to_csv(voro_dir+l+'.df.csv')\n vol2D = voronoi_df['volume'].values\n del V2D\n\n vol2D_chunk = vol2D[unique_inverse]\n\n# comm.barrier()\n\n #vols_all = comm.gather(vol2D_chunk,root=0)\n #G1_all = comm.gather(G1_chunk,root=0)\n vols_all.append(vol2D_chunk)\n G1_all.append(G1_chunk)\n #if rank == 0:\n # print('gathering chunks from ranks...')\n vols_all = np.concatenate(vols_all)\n G1_all = np.concatenate(G1_all,axis=0)\n \n G1_vol = np.vstack([G1_all.T,vols_all]).T\n np.save(voro_dir+'V2D_'+cat_name.split('.npy')[0]+'_%d_slices_ssize%.1lf_sspace.npy'%(Nslices,ls),G1_vol)","repo_name":"jarmijotorres/mock_project","sub_path":"Final_pipelines/mock_marks_boxes.py","file_name":"mock_marks_boxes.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25242225118","text":"#\n# @lc app=leetcode.cn id=392 lang=python3\n#\n# [392] 判断子序列\n#\n\n# @lc code=start\n# class Solution:\n# def isSubsequence(self, s: str, t: str) -> bool:\n# if not s:\n# return True\n\n# p1 = 0\n# p2 = 0\n# while p1 < len(s) and p2 < len(t):\n# if s[p1] == t[p2]:\n# p1 += 1\n# if p1 == len(s):\n# return True\n# p2 += 1\n# return False\n\n\nclass Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n if not s:\n return True\n if not t:\n return False\n n = len(t)\n dp = [[-1] * 26 for i in range(n)]\n dp[-1][ord(t[-1]) - ord('a')] = n - 1\n for i in range(n - 2, -1, -1):\n for j in range(26):\n dp[i][j] = i if ord(t[i]) - ord('a') == j else dp[i + 1][j]\n cur = 0\n for c in s:\n if cur >= n:\n return False\n new_cur = dp[cur][ord(c) - ord('a')]\n if new_cur == -1:\n return False\n cur = new_cur + 1\n return True\n# @lc code=end\n","repo_name":"LeungLoh/algorithm","sub_path":"LeetCode/392.判断子序列.py","file_name":"392.判断子序列.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39708843453","text":"import itertools\n\nimport lrs\n\nimport logging, logging_config\nlogger = logging.getLogger(__name__)\n\n\nclass CrissCross(lrs.Lrs):\n def __init__(self, hyperplane_matrix, m, d, bounding_box=None):\n super().__init__(hyperplane_matrix, m, d, bounding_box=bounding_box)\n\n def select_pivot(self):\n basis_index = self.d\n cobasis_index = 0\n for i in range(self.d, self.d + self.m):\n if basis_index <= self.m and self.B[basis_index] == i:\n if self.matrix[self.B.order[basis_index]][0] < 0:\n # Bi is primal infeasible\n for cobasis_index, c in enumerate(self.C[:-1]):\n if self.boxed and not self.pivot_stays_in_box(basis_index, cobasis_index):\n continue\n if self.matrix[self.B.order[basis_index]][self.C.order[cobasis_index]] > 0:\n logger.detailed_debug('Primal infeasible! pivot i={}, j={}'.format(\n basis_index, cobasis_index)\n )\n return basis_index, cobasis_index\n if not self.boxed:\n raise ValueError\n basis_index += 1\n elif cobasis_index < self.d and self.C[cobasis_index] == i:\n if self.matrix[0][self.C.order[cobasis_index]] > 0:\n # Ci is dual infeasible\n for basis_index, b in enumerate(self.B):\n if b < self.d:\n continue\n if self.boxed and not self.pivot_stays_in_box(basis_index, cobasis_index):\n continue\n if self.matrix[self.B.order[basis_index]][self.C.order[cobasis_index]] < 0:\n logger.detailed_debug('Dual infeasible! pivot i={}, j={}'.format(\n basis_index, cobasis_index)\n )\n return basis_index, cobasis_index\n if not self.boxed:\n # todo Is it possible to have dual infeasible solutions with no valid pivot?\n raise ValueError\n cobasis_index += 1\n return 0, 0\n\n def necessary_condition_for_reverse(self):\n def lower_index_pivot(k, k_in_cobasis=False):\n i = self.i if k_in_cobasis else k\n j = k if k_in_cobasis else self.j\n m_i = self.B.order[self.i] if k_in_cobasis else self.B.order[k]\n m_j = self.C.order[k] if k_in_cobasis else self.C.order[self.j]\n pivot_element = self.matrix[m_i][m_j]\n if (k_in_cobasis and pivot_element < 0) or (not k_in_cobasis and pivot_element > 0):\n if not self.boxed or self.pivot_stays_in_box(i, j):\n return True\n return False\n\n if self.boxed and not self.pivot_stays_in_box(self.i, self.j):\n return False\n\n if self.matrix[self.B.order[self.i]][0] > 0:\n if (\n self.matrix[self.B.order[self.i]][self.C.order[self.j]] > 0 and\n not any(lower_index_pivot(k, k_in_cobasis=True)\n for k in\n range(0, self.max_index_of_smaller_number(self.C, self.B[self.i]) + 1)\n )\n ):\n return True\n if self.matrix[0][self.C.order[self.j]] < 0:\n if (self.matrix[self.B.order[self.i]][self.C.order[self.j]] < 0 and\n not any(lower_index_pivot(k, k_in_cobasis=False)\n for k in\n range(self.d, self.max_index_of_smaller_number(self.C, self.C[self.j]) + 1)\n )):\n return True\n logger.detailed_debug('Necessary Condition for reverse not fulfilled!')\n return False\n\n def forest_search(self):\n degenerated_basis_vars = self.get_degenericies()\n search_status = lrs.SearchStatus.NONE\n if len(degenerated_basis_vars) == 0:\n search = self.search()\n while search_status != lrs.SearchStatus.DONE:\n search_status = search.__next__()\n yield search_status\n else:\n logging.info('Degenerated start basis: Start forest search!')\n degeneracy = self.C[:-1] + degenerated_basis_vars\n degeneracy_hyperplanes = [v.hyperplane_index for v in degeneracy]\n start_bases_by_hyperplanes = itertools.combinations(degeneracy_hyperplanes, self.d - 1)\n self.append_solution(check_lexicographic_order=False)\n for basis_hyperplanes in start_bases_by_hyperplanes:\n logging.info(f'Start tree search with start hyperplanes {basis_hyperplanes}')\n self.move_to_hyperplanes(basis_hyperplanes)\n search_status = lrs.SearchStatus.NEWTREE\n yield search_status\n search = self.search()\n while True:\n search_status = search.__next__()\n if search_status == lrs.SearchStatus.DONE:\n break\n yield search_status\n yield lrs.SearchStatus.DONE\n","repo_name":"LovisAnderson/lrspy","sub_path":"crisscross.py","file_name":"crisscross.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14698100398","text":"# Name : Andrew Devito Aryo\n# NPM : 2306152494\n# TA Code : GAN\n\n# Take input for Student's First and Last Name\nname = input(\"Enter name: \").title()\n\n# Take input for three exams\nexam1 = int(input(\"Enter the score for Exam 1: \"))\nexam2 = int(input(\"Enter the score for Exam 2: \"))\nexam3 = int(input(\"Enter the score for Exam 3: \"))\n\n# Calculate the average and total grade\n# Using string formatting to display only 2 digits after decimal point\naverage_exam = f\"{((exam1 + exam2 + exam3) / 3):.2f}\"\n\ntotal_exam = str(exam1 + exam2 + exam3)\n\n# Take the total seconds as input\ntotal_seconds = int(input(\"Enter the total seconds taken for the exams: \"))\n\n# Calculate the hours, minutes, and remaining seconds\n# Use the operators // and %\nhours = str(total_seconds // 3600) # Get Hours by divide-floor total_seconds by 3600 (1 hour = 3600 seconds)\nminutes = str((total_seconds % 3600) // 60) # Get Minutes by get the remainder of total_seconds divided by 3600, and then divide-floor it by 60 (1 Minutes = 60 seconds)\nseconds = str((total_seconds % 3600) % 60) # Get Seconds by get the remainder of total_seconds divided by 3600, and then get the remainder of it divided by 60\n\n# Format and print the feedback messages\n# Message 1\nprint(\"\\n---\", name, \"---\")\nprint(\"Exam Scores:\", str(exam1) + \", \" + str(exam2) + \", \" + str(exam3))\nprint(\"Total Scores:\", total_exam)\nprint(\"Average Score:\", average_exam)\nprint(\"Time taken:\", hours, \"Hours\", minutes, \"Minutes\", seconds, \"Seconds\")\n\n# Message 2\nprint(\"\\n--- Message for\", name, \"---\")\nprint(\"Hey, \" + name + \". You got exam scores of \" \n + str(exam1) + \", \" + str(exam2) + \", and \" + str(exam3) + \" with total of \"\n + total_exam + \" and average of \" + average_exam + \". The total time taken is \" \n + hours + \" Hours \" + minutes + \" Minutes \" + seconds + \" Seconds.\")","repo_name":"Andrew4Coding/ddp1-workspace","sub_path":"Lab/LAB1/GAN_AndrewDevitoAryo_2306152494_Lab01.py","file_name":"GAN_AndrewDevitoAryo_2306152494_Lab01.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39109249957","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nlearning_rates = [0.1, 0.01, 0.0001, 0.005, 0.001]\nfor r in learning_rates:\n plt.figure(figsize=(8,6), dpi=80)\n plt.subplot(1,1,1)\n l = np.load(\"log_q1_{}_loss.npy\".format(r))\n plt.plot(l, linewidth=1.0, linestyle='-', label=\"Cross-Entropy Loss\")\n plt.title(\"Weight-Decay λ=0.01, Mini-Batch Size B=500, Training Rate η = \"+str(r), fontsize=10)\n plt.xlabel(\"# of Epochs\")\n plt.ylabel(\"Training Loss (Cross-Entropy Loss)\")\n plt.legend(title=\"Training Rate\", loc=\"upper right\")\n plt.grid('on', linestyle='-', linewidth=0.5)\n plt.savefig(\"learning_{}_loss.pdf\".format(r), format=\"pdf\")\n\n plt.figure(figsize=(8,6), dpi=80)\n plt.subplot(1,1,1)\n v = np.load(\"log_q1_{}_valid_acc.npy\".format(r))\n t = np.load(\"log_q1_{}_train_acc.npy\".format(r))\n plt.plot(v, linewidth=1.0, linestyle='-', label=\"Validation Data Accuracy\")\n plt.plot(t, linewidth=1.0, linestyle='-', label=\"Training Data Accuracy\")\n plt.title(\"Weight-Decay λ=0.01, Mini-Batch Size B=500, Training Rate η = \"+str(r), fontsize=10)\n plt.xlabel(\"# of Epochs\")\n plt.ylabel(\"Accuracy (Percentage Correct)\")\n plt.axhline(linewidth=2, color='r', y=1)\n plt.legend(title=\"Data Set\", loc=\"lower right\")\n plt.grid('on', linestyle='-', linewidth=0.5)\n plt.savefig(\"learning_{}_accuracy.pdf\".format(r), format=\"pdf\")\n\nplt.figure(figsize=(8,6), dpi=80)\nplt.subplot(1,1,1)\nl01 = np.load(\"log_q1_0.1_loss.npy\")\nl001 = np.load(\"log_q1_0.01_loss.npy\")\nl0001 = np.load(\"log_q1_0.001_loss.npy\")\nl0005 = np.load(\"log_q1_0.005_loss.npy\")\nl00001 = np.load(\"log_q1_0.0001_loss.npy\")\nplt.plot(l01, linewidth=1.0, linestyle='-', label=\"0.1\")\nplt.plot(l001, linewidth=1.0, linestyle='-', label=\"0.01\")\nplt.plot(l0001, linewidth=1.0, linestyle='-', label=\"0.001\")\nplt.plot(l0005, linewidth=1.0, linestyle='-', label=\"0.005\")\nplt.plot(l00001, linewidth=1.0, linestyle='-', label=\"0.0001\")\nplt.title(\"Effect of Learning Rate on Training Loss vs. Epoch\")\nplt.xlabel(\"# of Epochs\")\nplt.ylabel(\"Training Loss (Cross-Entropy Loss)\")\nplt.legend(title=\"Training Rate\", loc=\"upper right\")\nplt.grid('on', linestyle='-', linewidth=0.5)\nplt.savefig(\"learning_comparison_loss.pdf\", format=\"pdf\")\n\n\nplt.figure(figsize=(8,6), dpi=80)\nplt.subplot(1,1,1)\nl01 = np.load(\"log_q1_0.1_loss.npy\")\nv01 = np.load(\"log_q1_0.1_v_loss.npy\")\nplt.plot(l01, linewidth=1.0, linestyle='-', label=\"Training Set Loss\")\nplt.plot(v01, linewidth=1.0, linestyle='-', label=\"Validation Set Loss\")\nplt.title(\"Optimal Training and Validation Loss Curves\")\nplt.xlabel(\"# of Epochs\")\nplt.ylabel(\"Training Loss (Cross-Entropy Loss)\")\nplt.legend(title=\"Data Set Cross-Entropy Loss\", loc=\"upper right\")\nplt.grid('on', linestyle='-', linewidth=0.5)\nplt.savefig(\"learning_0.1_optimal_loss.pdf\", format=\"pdf\")\n\n\nplt.figure(figsize=(8,6), dpi=80)\nplt.subplot(1,1,1)\nva01 = np.load(\"log_q1_0.1_valid_acc.npy\")\nta01 = np.load(\"log_q1_0.1_train_acc.npy\")\nplt.plot(va01, linewidth=1.0, linestyle='-', label=\"Training Set Accuracy\")\nplt.plot(ta01, linewidth=1.0, linestyle='-', label=\"Validation Set Accuracy\")\nplt.title(\"Optimal Training and Validation Accuracy Curves\")\nplt.xlabel(\"# of Epochs\")\nplt.ylabel(\"Accuracy (Percentage Correctly Classified)\")\nplt.legend(title=\"Data Set Accuracy\", loc=\"lower right\")\nplt.grid('on', linestyle='-', linewidth=0.5)\nplt.savefig(\"learning_0.1_optimal_acc.pdf\", format=\"pdf\")\n\n","repo_name":"connorjsmith/ece521","sub_path":"a2/q2.1.1/graph_this.py","file_name":"graph_this.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12334757124","text":"from characteristic import Attribute, attributes\n\nfrom cycy import include\n\n\n@attributes(\n [Attribute(name=\"includers\")],\n apply_with_init=False,\n)\nclass Preprocessor(object):\n def __init__(self, includers=None):\n if includers is None:\n includers = []\n self.includers = includers + _DEFAULT_INCLUDE\n\n def preprocessed(self, tokens, parser):\n \"\"\"\n Preprocess a stream of tokens.\n\n \"\"\"\n\n for token in tokens:\n if token.name == \"INCLUDE\":\n name = next(tokens).value.strip('\"')\n included = self.include(name=name, parser=parser)\n for token in included.tokens:\n yield token\n else:\n yield token\n\n def include(self, name, parser):\n for includer in self.includers:\n try:\n return includer.include(name=name, parser=parser)\n except include.NotFound:\n pass\n raise include.NotFound(path=name, searched=self.includers)\n\n\ndef with_directories(directories):\n return Preprocessor(\n includers=[\n include.DirectoryIncluder(path=directory)\n for directory in directories\n ],\n )\n\n\n_DEFAULT_INCLUDE = [\n include.DirectoryIncluder(path=\"/usr/local/include/\"),\n include.DirectoryIncluder(path=\"/usr/include/\"),\n]\n","repo_name":"Python3pkg/cycy","sub_path":"cycy/parser/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"5928750873","text":"# data_processory.py\n# Purpose: Parse data from csv to use for ML\n# Notes: CSV file must have header row, and contain only float type metrics\n\nimport csv\nimport numpy as np\n\n'''\ndef main():\n\tFEATURE_INDECIES = [3, 5, 6, 7, 10, 12, 13, 14, 15]\n\tFILENAME = \"raw_data.csv\"\n\tcolumn_names = get_column_names(FILENAME, FEATURE_INDECIES)\n\tprint(column_names)\n\tdataset = csv_to_array(FILENAME, FEATURE_INDECIES)\n\tprint(dataset)\n'''\n\n\n# Function:\tget_column_names\n# Parameters: filename - string name of csv file to be parsed\n#\t\t\t column_indecies - array of indecies we want to parse\n# Returns: column_names - 1D array of strings holding the names of columns we parsed\ndef get_column_names(filename, column_indecies):\n\tdata_f = open(filename)\n\tdata_csv = csv.reader(data_f)\n\tcolumn_names = []\n\tfor row in data_csv:\n\t\tfor i in column_indecies:\n\t\t\tcolumn_names.append(row[i])\n\t\tbreak\n\treturn column_names\n\n\n\n# Function:\tcsv_to_array\n# Parameters: filename - string name of csv file to be parsed\n#\t\t\t column_indecies - array of indecies we want to parse\n# Returns: dataset - 2D array of floats holding the data from columns we parsed\ndef csv_to_array(filename, column_indecies):\n\tdata_f = open(filename)\n\tdata_csv = csv.reader(data_f)\n\tdataset = []\n\tfloat_row = []\n\tnext(data_csv)\n\tfor row in data_csv:\n\t\tformatted_row = []\n\t\tfor i in column_indecies:\n\t\t\tformatted_row.append(row[i])\n\t\tfloat_row = list(map(float, formatted_row))\t\n\t\tdataset.append(float_row)\n\treturn dataset\n\ndef string_conversion(filename, column_indecies):\n\tdata_f = open(filename)\n\tdata_csv = csv.reader(data_f)\n\tdataset = []\n\tfloat_row = []\n\tnext(data_csv)\n\tfor row in data_csv:\n\t\tformatted_row = []\n\t\tfor i in column_indecies:\n\t\t\tformatted_row.append(row[i])\n\t\tformatted_row[1] = float(formatted_row[1])\n\t\tformatted_row[2] = float(formatted_row[2])\n\t\tformatted_row[3] = float(formatted_row[3])\n\t\tformatted_row[5] = float(formatted_row[5])\n\t\tformatted_row[6] = float(formatted_row[6])\n\t\tformatted_row[7] = float(formatted_row[7])\n\t\tdataset.append(formatted_row)\n\treturn dataset\n\n\n'''\nif __name__ == \"__main__\":\n\tmain()\n'''\n\n","repo_name":"jamie-weiss/MLB-betting-AI","sub_path":"betting_DNN/data_processor_2.py","file_name":"data_processor_2.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"72342635396","text":"def parse_second(data: str) -> ([{int}], {int}, [[int]], [int]):\r\n [req, our, nearby] = data.split(\"\\n\\n\")\r\n\r\n oksets = []\r\n okset = set()\r\n for line in req.split('\\n'):\r\n words = line.split(' ')\r\n\r\n temp_set = set()\r\n for r in [-1, -3]:\r\n [start, stop] = map(int, words[r].split('-'))\r\n\r\n okset |= set(range(start, stop+1))\r\n temp_set |= set(range(start, stop+1))\r\n\r\n oksets.append(temp_set)\r\n\r\n tickets = [[int(x) for x in line.split(',')] for line in nearby.split('\\n')[1:]]\r\n\r\n our = list(map(int, our.split(\"\\n\")[1].split(\",\")))\r\n\r\n return oksets, okset, tickets, our\r\n\r\n\r\ndef valid_ticket(ticket: [int], okset: {int}) -> bool:\r\n return all(map(lambda x: x in okset, ticket))\r\n\r\n\r\ndef second():\r\n with open('input', 'r') as f:\r\n oksets, okset, tickets, our = parse_second(f.read())\r\n\r\n tickets = [ticket for ticket in tickets if valid_ticket(ticket, okset)]\r\n\r\n possible = [set(range(len(oksets))) for i in oksets]\r\n\r\n print(possible)\r\n\r\n for ticket in tickets:\r\n for posset, el in zip(possible, ticket): #Posset possible fields of that column\r\n remove = []\r\n for pos in posset:\r\n if el not in oksets[pos]:\r\n remove.append(pos)\r\n\r\n for pos in remove:\r\n posset.remove(pos)\r\n\r\n fields = [-1] * len(possible)\r\n\r\n for col, poss in sorted(enumerate(possible), key=lambda x: len(x[1])):\r\n for field in poss:\r\n # if field not in fields:\r\n if fields[field] == -1:\r\n fields[field] = col\r\n break\r\n\r\n cols = fields[0:6]\r\n print(cols)\r\n\r\n prod = 1\r\n for col in cols:\r\n prod *= our[col]\r\n\r\n print(prod)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nsecond()","repo_name":"KvGeijer/Advent_of_Code_2020","sub_path":"Day16/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8983027982","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Spider, Request\nfrom lagou.items import LagouItem\nfrom lagou.atlogin import aotulogin\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC \nfrom selenium.webdriver.common.by import By\n\nimport lxml\nimport json\nimport time\nimport requests\nimport random\n\nclass LgSpider(scrapy.Spider):\n\n #成功获取数据的页码\n success_pages = []\n\n name = 'lg'\n allowed_domains = ['logou.com']\n start_urls = ['http://logou.com/',\n 'https://www.lagou.com/jobs/positionAjax.json?']\n\n # addrrKey = input(\"请输入地址:\")\n # carrerKey = input(\"请输入岗位:\")\n\n addrrKey = \"广州\"\n carrerKey = \"前端\"\n\n #自动登录,获取driver进行操作\n driver = aotulogin()\n\n #url\n page_url = \"https://www.lagou.com/jobs/list_{}?city={}&cl=false&fromSearch=true&labelWords=&suginput=\".format(carrerKey, addrrKey)\n base_url = \"https://www.lagou.com/jobs/positionAjax.json?\"\n\n #headers\n headers = {\n 'Host': 'www.lagou.com',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:60.0) Gecko/20100101 Firefox/60.0',\n 'Referer': 'https://www.lagou.com/jobs/list_python?oquery=android&fromSearch=true&labelWords=relative&city=%E6%B7%B1%E5%9C%B3',\n }\n\n #参数\n params = {\n 'city': addrrKey.encode('utf-8'),\n 'kd': carrerKey.encode('utf-8'),\n 'pn': '1',\n 'px': 'new', #按最新发布排序\n }\n\n def fetch_page(self):\n self.driver.get(self.page_url)\n\n try: element = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'pager_is_current'))\n )\n finally: pass\n elements = self.driver.find_elements_by_xpath(\"//span[@class='pager_not_current']\")\n \n\n return elements[-1].get_attribute('innerHTML')\n\n def start_requests(self): \n\n page_count = int(self.fetch_page())\n\n print('-'*80)\n print('总页数: ', page_count)\n print('-'*80)\n\n for page in range(1, page_count+1):\n print ('start request ...... page: ', self.params['pn'])\n yield scrapy.FormRequest(self.base_url, headers=self.headers, formdata=self.params, callback=self.parse)\n self.params['pn'] = str(page+1)\n\n\n def parse(self, response):\n \n text = json.loads(response.text)\n\n try:\n content = text['content']\n except Exception as e:\n print(text)\n finally:\n \n \n # print(json.load(response.text))\n\n page = content['pageNo']\n self.success_pages.append(page)\n\n print('success page: ', page)\n \n #解析数据,提取需要的数据\n results = content['positionResult']['result']\n for result in results: \n job = LagouItem()\n job['job_name'] = result['positionName']\n job['job_addr'] = result['district']\n job['job_time'] = result['createTime']\n job['job_limit'] = result['education'] + '、' + result['workYear']\n job['job_salary'] = result['salary']\n job['job_company'] = result['companyFullName']\n job['job_company_type'] = result['financeStage'] + '、' + result['industryField']\n job['job_vip'] = result['positionAdvantage']\n job['page'] = page\n job['positionId'] = result['positionId']\n yield job\n\n","repo_name":"zhouanqi/Python-practice-questions","sub_path":"lagou/spiders/lg.py","file_name":"lg.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42749438497","text":"import random\nwin_or_lose_count_not_changed = 0 # Win counter when the gate is not changed\nwin_or_lose_count_when_changed = 0 # Win counter when the gate is changed\ntries_range = 1000 # Number of repetitions\nnumb_gates = 3 # Number of gates in each repetition\n\n\nfor i in range(1,tries_range+1): # For loop for number of repetitions\n\n\n x = {}\n\n for i in range(1,numb_gates+1): # Creation of dictionary with gate numbers and their initial False bool value\n x[i] = False\n\n winning_gate = random.randint(1,numb_gates) # Choosing of the winning gate with random\n x[winning_gate] = True # Altering the dictionary with the winning gate value to True bool value\n picked_gate = random.randint(1,numb_gates) # Randomly picking up the gate by the contestant\n unreleaved_gate = winning_gate # Creating an unreleaved gate which the player does not know the content of\n\n if unreleaved_gate == picked_gate: # The unreleaved gate cannot be the players picked gate, that's why the while loop in conditional\n while unreleaved_gate == picked_gate:\n unreleaved_gate = random.randint(1,numb_gates)\n# Not changing the gate scenario\n if x[picked_gate]:\n win_or_lose_count_not_changed += 1\n# Changing the gate scenario\n picked_gate = unreleaved_gate\n if x[picked_gate]:\n win_or_lose_count_when_changed += 1\n\n# Final procentage calculations and print commands with the results\nwin_or_lose_count_not_changed = win_or_lose_count_not_changed /tries_range * 100\nwin_or_lose_count_when_changed = win_or_lose_count_when_changed /tries_range * 100\nprint(\"After\", tries_range, \"tries.\")\nprint(\"When gate is not change\", win_or_lose_count_not_changed, \" %.\")\nprint(\"When gate is changed\", win_or_lose_count_when_changed, \" %.\")\n\n","repo_name":"rapnyt/Python-Learning","sub_path":"monty_hall.py","file_name":"monty_hall.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23948732760","text":"import torch\nimport cv2\nimport os\nfrom PIL import Image\nfrom torch.nn import functional\nimport numpy as np\nfrom model.coco_dataset import get_test_coco_dataset_iter\n\nfrom data_handler.coco_api import CocoCam\nfrom pycocotools import mask\nfrom collections import defaultdict\nimport activation.config as cf\nfrom model.coco_dataset import load_mscoco_metadata\nfrom torchvision import transforms\n# import warnings\n# warnings.filterwarnings(\"error\")\n\nimg_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\ngray_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n\n\ndef extract_features_and_pred_label_from_nn(model, data):\n \"\"\"predict the label for an image.\"\"\"\n last_conv_layer = \"layer4\"\n avg_layer = \"avgpool\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n model.eval()\n features_blobs = []\n\n def conv_layer_hook(module, grad_input, grad_output):\n features_blobs.append(grad_output.data.cpu().numpy())\n\n model._modules.get(last_conv_layer).register_forward_hook(conv_layer_hook)\n model._modules.get(avg_layer).register_forward_hook(conv_layer_hook)\n result = model(data.to(device))\n result = functional.softmax(result, dim=1).data.squeeze()\n result = torch.topk(result, k=1, dim=1)\n return result, features_blobs\n\n\ndef extract_activation_maps(model, features, pred_label, num_of_cams):\n \"\"\" class activation map.\"\"\"\n last_layer_weights = list(model.parameters())[-2]\n size_upsample = (224, 224) # verify input img size\n avg_pool_features = features[1]\n\n cams = []\n for id, each_sample_class_idx in enumerate(np.squeeze(pred_label)):\n top_activation_maps = torch.topk(last_layer_weights[each_sample_class_idx] * torch.Tensor(np.squeeze(avg_pool_features[id])),\n k=num_of_cams)\n top_activation_map_ids = top_activation_maps.indices.numpy()\n top_activation_map_weights = top_activation_maps.values.detach().numpy()\n\n each_img_cams = list()\n for cam_id, cam_weigh in zip(top_activation_map_ids, top_activation_map_weights):\n cam = features[0][id][cam_id]\n cam = np.maximum(cam, 0)\n cam -= np.min(cam)\n cam /= np.max(cam)\n each_img_cams.append((cam_id, cam, cam_weigh))\n cams.append(each_img_cams)\n return cams\n\n\ndef get_coco_samples_per_class(number_of_classes, num_of_sample_per_class):\n \"\"\" Fetch samples for each class and arrange docs in an order with the class_id\n \"\"\"\n images, labels, img_id = defaultdict(list), defaultdict(list), defaultdict(list)\n images_2, labels_2, img_id_2 = defaultdict(list), defaultdict(list), defaultdict(list)\n\n test_data_iter = get_test_coco_dataset_iter(cf.class_ids, cf.val_data_dir, cf.batch_size, cf.num_workers)\n data_ob = load_mscoco_metadata(data_type=\"val\")\n visited_classes = defaultdict(int)\n\n for data_batch, label_batch, data_id in test_data_iter:\n for data, label, id in zip(data_batch, label_batch, data_id):\n segmentation = [each_data[\"segmentation\"] for each_data in data_ob if each_data[\"file_name\"] == id]\n if not segmentation:\n continue\n label = label.numpy().item()\n if visited_classes[label] < num_of_sample_per_class:\n images[label].append(data)\n visited_classes[label] += 1\n labels[label].append(label)\n img_id[label].append(id)\n else:\n images_2[label].append(data)\n labels_2[label].append(label)\n img_id_2[label].append(id)\n\n # if (len(visited_classes) == number_of_classes) and (len(set(visited_classes.values())) == 1):\n # break\n # combine all class docs\n imgs, labels_, img_names = [], [], []\n for key in labels.keys():\n imgs.extend(images[key])\n labels_.extend(labels[key])\n img_names.extend(img_id[key])\n\n imgs_2, labels_2_, img_names_2 = [], [], []\n for key in labels_2.keys():\n imgs_2.extend(images_2[key])\n labels_2_.extend(labels_2[key])\n img_names_2.extend(img_id_2[key])\n\n return (torch.stack(imgs), torch.Tensor(labels_), img_names), (torch.stack(imgs_2), torch.Tensor(labels_2_), img_names_2)\n\n\nclass ResultsData:\n\n def __init__(self, model, data_to_visualize_func, num_of_cams, class_ids, val_data_dir):\n # test data\n self.t_images, self.t_labels, self.img_names = data_to_visualize_func\n # extract features and predicted label from the neural network\n self.t_topk, self.features = extract_features_and_pred_label_from_nn(model, self.t_images)\n self.probs, self.pred_label = self.t_topk\n self.probs, self.pred_label = self.probs.detach().numpy(), np.squeeze(self.pred_label.detach().numpy())\n # fetched activation maps for the predicated labels.\n self.cams = extract_activation_maps(model, self.features, self.pred_label, num_of_cams)\n self.nn_labels = extract_class_names(class_ids, cf.val_ann_file)\n # load test data\n self.data_ob = load_mscoco_metadata(data_type=\"val\")\n self.val_data_dir = val_data_dir\n\n def construct_visualization_data(self):\n \"\"\" Pre-Process Visualization data. input_image, cam1, cam2 .\"\"\"\n data_to_visualize, labels_for_vis_data, polygon_intersection = [], [], []\n for each_img, each_label, img_name, img_cams, each_pred_label in \\\n zip(self.t_images.numpy(), self.t_labels.numpy(), self.img_names, self.cams, self.pred_label):\n # input image\n img_binary_masks = []\n for i_data in self.data_ob:\n if i_data[\"file_name\"] == img_name:\n img_binary_masks = [mask.decode(i_data[\"mask\"][mask_ind]) for mask_ind in\n range(len(i_data[\"mask\"]))]\n break\n\n each_img = Image.open(os.path.join(self.val_data_dir, img_name)).convert('RGB')\n\n obj_over_img = project_object_mask(img_binary_masks, each_img, color=1)\n\n data_to_visualize.append(obj_over_img)\n labels_for_vis_data.append(self.nn_labels[each_label])\n polygon_intersection.append(img_name)\n for _, each_cam, _ in img_cams:\n # activation map\n each_cam = apply_mask_threshold(each_cam, cf.threshold_cam)\n q_measure_bin, common_mask = compute_intersection_area_using_binary_mask(each_cam, img_binary_masks)\n cam_with_img = activation_map_over_img(obj_over_img, each_cam, alpha=0.5)\n\n # polygon of activation map\n\n cam_with_polygon, heatmap_polygons = draw_heatmap_polygon(cam_with_img, each_cam)\n cam_with_polygon = cam_with_polygon.astype(int)\n\n # draw common area\n common_over_img = project_object_mask(common_mask, cam_with_polygon, color=2)\n\n data_to_visualize.append(common_over_img)\n labels_for_vis_data.append(self.nn_labels[each_pred_label])\n polygon_intersection.append(q_measure_bin)\n\n return data_to_visualize, labels_for_vis_data, polygon_intersection\n\n def construct_eval_matrix_data(self):\n \"\"\"Naive omega matrix \"\"\"\n ground_truth, prediction, q_measure = [], [], []\n for each_label, img_name, img_cams, each_pred_label in \\\n zip(self.t_labels.numpy(), self.img_names, self.cams, self.pred_label):\n\n # input image\n img_binary_masks = []\n for i_data in self.data_ob:\n if i_data[\"file_name\"] == img_name:\n img_binary_masks = [mask.decode(i_data[\"mask\"][mask_ind]) for mask_ind in\n range(len(i_data[\"mask\"]))]\n # ground truth\n ground_truth.append(each_label)\n prediction.append(each_pred_label)\n cam_q_data = list()\n for cam_id, each_cam, cam_weigh in img_cams:\n # threshold on activation map\n each_cam = apply_mask_threshold(each_cam, cf.threshold_cam)\n # intersection area\n q_measure_bin, common_mask = compute_intersection_area_using_binary_mask(each_cam, img_binary_masks)\n cam_q_data.append((cam_id, q_measure_bin, cam_weigh))\n\n q_measure.append(cam_q_data)\n\n return ground_truth, prediction, q_measure\n\n\ndef project_object_mask(img_binary_masks, image, color=1):\n\n alpha = 0.6\n image = np.asarray(image)\n img2 = image.copy()\n if isinstance(img_binary_masks, list):\n\n for img_binary_mask in img_binary_masks:\n bin_mask_ind = np.where(img_binary_mask > 0)\n img2[bin_mask_ind[0], bin_mask_ind[1], color] = 255\n\n else:\n bin_mask_ind = np.where(img_binary_masks > 0)\n img2[bin_mask_ind[0], bin_mask_ind[1], color] = 255\n obj_over_img = (img2 * alpha) + image * (1 - alpha)\n\n # todo: cropping is not perfect.\n image = Image.fromarray(np.uint8(obj_over_img))\n img = img_transform(image).data.numpy().transpose((1, 2, 0))\n image = normalize_image(img)\n return image\n\n\ndef compute_intersection_area_using_binary_mask(cam_mask, img_binary_masks):\n # img_binary_mask = cv2.resize(img_binary_mask, (224, 224, 3))\n img_binary_mask_0 = np.squeeze(gray_transform(Image.fromarray(img_binary_masks[0])).data.numpy().transpose((1, 2, 0)))\n img_binary_mask_union = np.where(img_binary_mask_0 > 0, 1, 0)\n for img_binary_mask in img_binary_masks[1:]:\n img_binary_mask = np.squeeze(gray_transform(Image.fromarray(img_binary_mask)).data.numpy().transpose((1, 2, 0)))\n img_binary_mask = np.where(img_binary_mask > 0, 1, 0)\n img_binary_mask_union = np.bitwise_or(img_binary_mask_union, img_binary_mask)\n\n cam_mask = np.where(cam_mask > 0, 1, 0)\n\n common_mask = np.bitwise_and(img_binary_mask_union, cam_mask)\n common_mask = mask.encode(np.asfortranarray(common_mask).astype('uint8'))\n common_area = mask.area(common_mask)\n fortran_arr = np.asfortranarray(cam_mask).astype('uint8')\n if np.max(fortran_arr) == 0:\n q_measure = 0.0\n else:\n q_measure = common_area/mask.area(mask.encode(fortran_arr))\n\n return q_measure, mask.decode(common_mask)\n\n\ndef normalize_image(image):\n \"\"\"normalize image.\"\"\"\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n image = np.clip(image, 0, 1)\n return image\n\n\ndef apply_mask_threshold(cam, threshold_cam):\n cam_img = cv2.resize(cam, (224, 224))\n cam = np.where(cam_img < np.percentile(cam_img, threshold_cam), 0, cam_img)\n return np.uint8(cam*255)\n\n\ndef activation_map_over_img(image, cam, alpha=0.7):\n \"\"\"Overlay activation map on image\"\"\"\n\n heatmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET)\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)\n image = np.round(image*255.0).astype(int)\n cam_over_img = (heatmap*alpha) + image*(1-alpha)\n return cam_over_img.astype(int)\n\n\ndef draw_heatmap_polygon(image, cam):\n image = cv2.cvtColor(np.float32(image), cv2.COLOR_RGB2BGR)\n edged = cv2.Canny(cam, 30, 200)\n contours, hierarchy = cv2.findContours(edged,\n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n _ = cv2.drawContours(image, contours, -1, (0, 255, 0), 1)\n heatmap_polygons = []\n for i_contours in range(len(contours)):\n heatmap_polygons.append(np.squeeze(contours[i_contours]).ravel().tolist())\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), heatmap_polygons\n\n\ndef extract_class_names(class_ids, val_ann_file):\n \"\"\"Maps nerual networks class ids with dataset class id and return maps of class_id and class name.\"\"\"\n class_ids_map_with_nn = {key: ind for ind, key in enumerate(class_ids)}\n coco = CocoCam(val_ann_file)\n labels = coco.get_cat_labels(catIds=class_ids)\n labels_with_nn_id = {class_ids_map_with_nn[key]: value for key, value in labels.items()}\n\n return labels_with_nn_id\n","repo_name":"pawneshg/XAI-CFAMs","sub_path":"activation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40314412929","text":"# 4 classes\n# image size 28*28*1\n# pixel value [0,1]\n\n##### set specific gpu #####\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras import datasets, layers, models\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport time\nimport shutil\nimport pandas as pd\n\n##### \nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\nimage_size = 28\ndivide_num = 2\nimage_dir = \"datasets\"\nclasses = [0,1,2,3]\nimage_amount = 2000\ncomplexity_level = 4\n\ndef generate_base_image(label):\n image = np.zeros((image_size,image_size),dtype=np.float64)\n if label == 0:\n for i in range(image_size):\n for j in range(image_size):\n if i < image_size/divide_num and j < image_size/divide_num:\n image[i][j] = 1.\n else:\n image[i][j] = random.random()\n elif label == 1:\n for i in range(image_size):\n for j in range(image_size):\n if i < image_size/divide_num and j >= image_size - image_size/divide_num:\n image[i][j] = 1.\n else:\n image[i][j] = random.random()\n elif label == 2:\n for i in range(image_size):\n for j in range(image_size):\n if i >= image_size - image_size/divide_num and j < image_size/divide_num:\n image[i][j] = 1.\n else:\n image[i][j] = random.random()\n elif label == 3:\n for i in range(image_size):\n for j in range(image_size):\n if i >= image_size - image_size/divide_num and j >= image_size - image_size/divide_num:\n image[i][j] = 1.\n else:\n image[i][j] = random.random()\n else:\n assert False, \"this class does not exist\"\n\n return image\n\n\ndef image_transfer_model(complexity_level):\n model = models.Sequential()\n model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same',kernel_initializer='random_uniform',bias_initializer='random_normal',input_shape=(image_size, image_size,1)))\n model.add(layers.BatchNormalization())\n for _ in range(complexity_level):\n model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same',kernel_initializer='random_uniform',bias_initializer='random_normal',input_shape=(image_size, image_size,1)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(1, (3, 3), activation='relu', padding='same',kernel_initializer='random_uniform',bias_initializer='random_normal'))\n return model\n\n\n\ndef image_creator(image_num,complexity_level):\n generator = image_transfer_model(complexity_level)\n for a_class in classes:\n generated_image = []\n for num in range(image_num):\n generated_image.append(generate_base_image(a_class))\n generated_image = np.array(generated_image)\n generated_image = generated_image.reshape((image_num,image_size,image_size,1))\n generated_image = tf.convert_to_tensor(generated_image)\n generated_image = generator(generated_image,training=False)\n generated_image = np.array(generated_image)\n np.save(\"npy_datasets/\"+str(a_class)+'.npy', generated_image)\n\nimage_creator(image_amount,complexity_level)\n","repo_name":"RichardChangCA/An-Exploration-of-Universal-Adversarial-Perturbation-in-Deep-Learning","sub_path":"generator_and_training_models_v1/11_24_update/data_generator_npy.py","file_name":"data_generator_npy.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71387952518","text":"\nM = 1234567891\n\ndef hashing(s):\n result = 0\n for i in range(len(s)):\n num = ord(s[i]) - ord('a') + 1\n result += num * pow(31,i,M)\n\n return result % M\n\n_ = input()\ns = input()\n\nprint(hashing(s))\n","repo_name":"kangmyoungseok/baekjoon","sub_path":"해시/hashing.py","file_name":"hashing.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6124397328","text":"# -*- coding: utf-8 -*-\n# © 2017 Savoir-faire Linux\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).\n\nfrom odoo import api, models\n\n\nclass AccountReportContextFollowup(models.TransientModel):\n\n _inherit = 'account.report.context.followup'\n\n @api.depends('partner_id')\n def _get_invoice_address(self):\n for partner in self:\n if partner.partner_id:\n partner.invoice_address_id = (\n partner.partner_id.get_preferred_address(\n ['customer_payment', 'invoice']))\n else:\n partner.invoice_address_id = False\n","repo_name":"savoirfairelinux/account-addons","sub_path":"payment_workflow_enterprise/models/account_report_context_followup.py","file_name":"account_report_context_followup.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11533088874","text":"from typing import Any, Callable, Dict\n\nfrom docutils.parsers.rst import directives # type: ignore\n\n\nclass SphinxSpecBase:\n aliases: Dict[str, str]\n defaults: Dict[str, str]\n\n def __init__(self, hide: bool, linenos: bool, caption: str, language: str):\n # flags\n self.hide = hide\n self.linenos = linenos\n # values\n self.caption = caption\n self.language = language\n\n @classmethod\n def from_options(cls, options: Dict[str, Any]) -> 'SphinxSpecBase':\n opts = {}\n for alias, name in cls.aliases.items():\n if name not in cls.defaults:\n # is a flag\n opts[name] = alias in options\n else:\n # is a value\n val = options.get(alias, None)\n if not val:\n val = cls.defaults[name]\n opts[name] = val\n return cls(**opts)\n\n @classmethod\n def update_spec(cls, spec: Dict[str, Callable[[Any], Any]]):\n for alias, name in cls.aliases.items():\n # Flags don't have a default\n spec[alias] = directives.flag if name not in cls.defaults else directives.unchanged\n\n\ndef build_spec() -> Dict[str, Callable[[Any], Any]]:\n spec = {}\n SpecCode.update_spec(spec)\n SpecOutput.update_spec(spec)\n return spec\n\n\nclass SpecCode(SphinxSpecBase):\n aliases = {\n 'hide_code': 'hide',\n 'caption': 'caption',\n 'language': 'language',\n 'linenos': 'linenos',\n 'filename': 'filename',\n }\n defaults = {\n 'caption': '',\n 'language': 'python',\n 'filename': '',\n }\n\n def __init__(self, hide: bool, linenos: bool, caption: str, language: str, filename: str):\n super().__init__(hide, linenos, caption, language)\n self.filename: str = filename\n\n\nclass SpecOutput(SphinxSpecBase):\n aliases = {\n 'hide_output': 'hide',\n 'caption_output': 'caption',\n 'language_output': 'language',\n 'linenos_output': 'linenos',\n }\n defaults = {\n 'caption': '',\n 'language': 'none',\n }\n","repo_name":"spacemanspiff2007/sphinx-exec-code","sub_path":"src/sphinx_exec_code/sphinx_spec.py","file_name":"sphinx_spec.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"62"} +{"seq_id":"13611226854","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Ida Lunde Naalsund\"\n__email__ = \"idna@nmbu.no\"\n\n\nclass LCGRand:\n \"\"\"Implementation of a linear congruential generator\n that returns random numbers.\n \"\"\"\n\n def __init__(self, seed):\n \"\"\"Initialises the class with given constants a and m.\n\n Parameters\n ----------\n seed : int\n The seed that the generating is based on.\n \"\"\"\n self._hidden_state = seed\n self.slope = 7 ** 5\n self.congruence_class = 2 ** 31 - 1\n\n def rand(self):\n \"\"\"Generates a single random number based on a seed given by the user.\n\n Returns\n -------\n random_number : int\n The generated number.\n \"\"\"\n self._hidden_state *= self.slope\n self._hidden_state %= self.congruence_class\n\n return self._hidden_state\n\n def random_sequence(self, length):\n\n return RandIter(self, length)\n\n def infinite_random_sequence(self):\n \"\"\"\n Generate an infinite sequence of random numbers.\n\n Yields\n ------\n self.rand() : int\n A random number.\n\n \"\"\"\n while True:\n yield self.rand()\n\n\nclass RandIter:\n def __init__(self, random_number_generator, length):\n \"\"\"\n\n Arguments\n ---------\n random_number_generator :\n A random number generator with a ``rand`` method that\n takes no arguments and returns a random number.\n length : int\n The number of random numbers to generate\n \"\"\"\n\n self.generator = random_number_generator.rand\n self.length = length\n self.num_generated_numbers = None\n\n def __iter__(self):\n \"\"\"\n Initialise the iterator.\n\n Returns\n -------\n self : RandIter\n\n Raises\n ------\n RuntimeError\n If iter is called twice on the same RandIter object.\n \"\"\"\n if self.num_generated_numbers is not None:\n raise RuntimeError('iter can only be called once on same '\n 'RandIter object')\n\n self.num_generated_numbers = 0\n return self\n\n def __next__(self):\n \"\"\"\n Generate the next random number.\n\n Returns\n -------\n int\n A random number.\n\n Raises\n ------\n RuntimeError\n If the ``__next__`` method is called before ``__iter__``.\n StopIteration\n If ``self.length`` random numbers are generated.\n \"\"\"\n if self.num_generated_numbers is None:\n raise RuntimeError('__next__ method is called before __iter__')\n\n if self.num_generated_numbers == self.length:\n raise StopIteration('All random numbers have been'\n 'generated')\n\n self.num_generated_numbers += 1\n\n return self.generator()\n\n\nif __name__ == '__main__':\n rand_generator = LCGRand(1)\n for rand in rand_generator.random_sequence(10):\n print(rand)\n\n for i, rand in enumerate(rand_generator.infinite_random_sequence()):\n print(f'The {i}-th random number is {rand}')\n if i > 100:\n break\n","repo_name":"idaln/INF200-2019-Exercises","sub_path":"src/ida_lunde_naalsund_ex/ex05/myrand.py","file_name":"myrand.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37673642529","text":"#-----------------------------------------------------------------\n# first-user-story\n#-----------------------------------------------------------------\n\nimport psycopg2\nimport sys\n\ndef heading(str):\n print('-'*60)\n print(\"** %s:\" % (str,))\n print('-'*60, '\\n') \n\nSHOW_CMD = True\ndef print_cmd(cmd):\n if SHOW_CMD:\n print(cmd.decode('utf-8'))\n\ndef print_rows(rows):\n for row in rows:\n print(row)\n\n#------------------------------------------------------------\n# Show the item of top most sold within a category\n#------------------------------------------------------------\ndef show_top_sold(cate):\n tmpl = '''\n SELECT item_id, item_name, sold_count\n FROM items\n WHERE category LIKE %s\n ORDER BY sold_count DESC\n LIMIT 1;\n '''\n cmd = cur.mogrify(tmpl, [('%'+cate+'%')])\n print_cmd(cmd)\n cur.execute(cmd)\n rows = cur.fetchall()\n print_rows(rows)\n print()\n for row in rows:\n item_id,item_name,sold_count = row\n print(\"%s. (%s) %s.\" % (item_id,item_name,sold_count))\n\nif __name__ == '__main__':\n try:\n db, user = 'abcdebay', 'isdb'\n if len(sys.argv) >= 2:\n db = sys.argv[1]\n if len(sys.argv) >= 3:\n user = sys.argv[2]\n conn = psycopg2.connect(database=db, user=user)\n conn.autocommit = True\n cur = conn.cursor()\n print(\"--------------------------------------------------------------\")\n print(\"User Story #3\")\n print(\"return the item of top most sold within a category\")\n print(\"Testing Appliances category\")\n print(\"Should return: Proctor Silex Plate sold 23\")\n show_top_sold(\"Appliances\")\n print(\"--------------------------------------------------------------\")\n except psycopg2.Error as e:\n print(\"Unable to open connection: %s\" % (e,))","repo_name":"SenF1/eBay-Database-Development-Simulation","sub_path":"Queries/simple_query_3.py","file_name":"simple_query_3.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5947801361","text":"''' Main file to execute all micropython codes '''\n\nimport _thread\nimport machine\nimport utime\nimport mqtt\n\n# Simple implementation for logging\nlogfile = 'mainlog.txt'\n\n\ntry:\n _thread.start_new_thread(mqtt.start, ())\nexcept Exception as exc:\n with open(logfile, 'a+') as f:\n print(str(exc))\n f.write(str(exc))\n f.write('\\n')\n utime.sleep(5)\n machine.reset()\n","repo_name":"siriuspal/UniversalRemote","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"7121176832","text":"import pygame, sys, os\n\nglobal animation_frames\nanimation_frames = {}\n\n# player animation\ndef load_animation(path,frame_durations):\n global animation_frames\n animation_name = path.split('/')[-1]\n animation_frame_data = []\n n = 1\n for frame in frame_durations:\n animation_frame_id = animation_name + '_' + str(n)\n img_loc = path + '/' + animation_frame_id + '.png'\n animation_image = pygame.image.load(img_loc).convert()\n animation_image.set_colorkey((255,255,255))\n animation_frames[animation_frame_id] = animation_image.copy()\n for i in range(frame):\n animation_frame_data.append(animation_frame_id)\n n += 1\n return animation_frame_data\n\ndef change_action(action_var,frame,new_value):\n if action_var != new_value:\n action_var = new_value\n frame = 0\n return action_var, frame\n\nanimation_db = {}\nanimation_db['idle'] = load_animation('images/sheets/player/idle',[7,7])\nanimation_db['walk'] = load_animation('images/sheets/player/walk',[7,7,40])\n\nplayer_action = 'idle'\nplayer_frame = 0\nplayer_flip = False\n\nground_sound_timer = 0\n\n# player variables\nplayer_rect = pygame.Rect(100,100,5,13)\n\nmove_right = False\nmove_left = False\nvertical_momentum = 0 # gravity\nair_timer = 0\n\ntrue_scroll = [0,0]\n\n","repo_name":"CptnReef/pyGame","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9858082287","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom collections import defaultdict\n\nfrom configargparse import ArgumentParser\nfrom time import gmtime, strftime\nfrom xlsxwriter import Workbook\n\nfrom SMACB.Constants import POSICIONES\nfrom SMACB.PartidoACB import PartidoACB\nfrom SMACB.SuperManager import SuperManagerACB\nfrom SMACB.TemporadaACB import TemporadaACB\nfrom Utils.Misc import CuentaClaves, FORMATOfecha, FORMATOtimestamp\n\n\ndef jugadoresMezclaStatus(datos):\n resultado = defaultdict(set)\n\n for jug in datos:\n datosJug = datos[jug]\n if 'I-activo' not in datosJug:\n (resultado[None]).add(jug)\n continue\n\n statusJug = datosJug['I-activo']\n (resultado[statusJug]).add(jug)\n\n return resultado\n\n\ndef CuentaClavesPartido(x):\n if type(x) is not dict:\n raise ValueError(\"CuentaClaves: necesita un diccionario\")\n\n resultado = defaultdict(int)\n\n for clave in x:\n valor = x[clave]\n\n if type(valor) is not PartidoACB:\n print(\"CuentaClaves: objeto de clave '%s' no es un PartidoACB, %s\" % (clave, type(valor)))\n continue\n\n for subclave in valor.__dict__.keys():\n resultado[subclave] += 1\n\n return resultado\n\n\ndef mezclaJugadores(jugTemporada, jugSuperManager):\n resultado = defaultdict(dict)\n\n for claveSM in jugSuperManager:\n for jug in jugSuperManager[claveSM]:\n resultado[jug][claveSM] = jugSuperManager[claveSM][jug]\n\n for claveTM in jugTemporada:\n for jug in jugTemporada[claveTM]:\n resultado[jug][claveTM] = jugTemporada[claveTM][jug]\n\n return resultado\n\n\ndef preparaDatosComunes(datosMezclados):\n resultado = dict()\n datosCabecera = dict()\n\n titularCabecera = ['Pos', 'Cupo', 'Lesion', 'Nombre', 'Equipo', 'Promedio Val', 'Precio',\n 'Proximo Rival', 'Precio punto']\n\n jugadoresActivos = jugadoresMezclaStatus(datosMezclados)[True]\n # jugadoresInactivos = jugPorStatus[False]\n jugDataActivos = {x: datosMezclados[x] for x in jugadoresActivos}\n\n for jug in jugDataActivos:\n cabecJug = list()\n datosJug = jugDataActivos[jug]\n\n for campo in ['I-pos', 'I-cupo', 'I-lesion', 'I-nombre', 'I-equipo', 'I-promVal', 'I-precio']:\n if campo in datosJug:\n if campo == 'I-pos':\n cabecJug.append(POSICIONES[datosJug[campo]])\n continue\n elif campo == 'I-lesion':\n salud = \"Lesionado\" if datosJug[campo] else \"\"\n cabecJug.append(salud)\n continue\n\n cabecJug.append(datosJug[campo])\n else:\n print(\"Falla clave:\", campo, datosJug)\n exit(1)\n\n proxPartido = (\"@\" if datosJug['I-proxFuera'] else \"\") + datosJug['I-rival']\n cabecJug.append(proxPartido)\n costePunto = (datosJug['I-precio'] / datosJug['I-promVal']) if (datosJug['I-promVal']) > 0 else \"-\"\n cabecJug.append(costePunto)\n datosCabecera[jug] = cabecJug\n\n claves = list(map(lambda x: x[0], sorted(list(map(lambda x: (x, jugDataActivos[x]['I-precio']), jugDataActivos)),\n reverse=True,\n key=lambda x: x[1])))\n\n resultado['claves'] = claves\n resultado['cabeceraLinea'] = datosCabecera\n resultado['titularCabecera'] = titularCabecera\n\n return resultado\n\n\ndef preparaExcel(supermanager, temporada, nomFichero=\"/tmp/SM.xlsx\", ):\n jugSM = supermanager.extraeDatosJugadores()\n jugTM = temporada.extraeDatosJugadores()\n jugData = mezclaJugadores(jugTM, jugSM)\n numJornadas = temporada.maxJornada()\n nombreJornadas = {False: temporada.Calendario.nombresJornada()[:numJornadas],\n True: ['J 0'] + temporada.Calendario.nombresJornada()[:numJornadas]}\n\n def preparaFormatos(workbook):\n resultado = dict()\n\n resultado['cabecera'] = workbook.add_format({'bold': True, 'align': 'center'})\n resultado['nulo'] = workbook.add_format()\n\n resultado['VL'] = workbook.add_format({'bold': True, 'bg_color': 'green'})\n resultado['DL'] = workbook.add_format({'bg_color': 'green'})\n resultado['VF'] = workbook.add_format({'bold': True, 'bg_color': 'blue'})\n resultado['DF'] = workbook.add_format({'bg_color': 'blue'})\n\n resultado['VLd'] = workbook.add_format({'bold': True, 'bg_color': 'green', 'num_format': '#,##0_;[Red]-#,##0'})\n resultado['DLd'] = workbook.add_format({'bg_color': 'green', 'num_format': '#,##0_;[Red]-#,##0'})\n resultado['VFd'] = workbook.add_format({'bold': True, 'bg_color': 'blue', 'num_format': '#,##0_;[Red]-#,##0'})\n resultado['DFd'] = workbook.add_format({'bg_color': 'blue', 'num_format': '#,##0_;[Red]-#,##0'})\n\n return resultado\n\n def calculaFormato(victoria, local, vdecimal):\n resultado = \"\"\n resultado += \"V\" if victoria else \"D\"\n resultado += \"L\" if local else \"F\"\n if vdecimal:\n resultado += \"d\"\n\n return resultado\n\n def creaHoja(workbook, nombre, clave, datosJugadores, datosComunes, formatos,\n nombreJornadas, valorDecimal=False, claveSM=True):\n clavesExistentes = CuentaClaves(datosJugadores)\n print(clavesExistentes)\n\n if clave not in clavesExistentes:\n return\n\n seqDatos = list(range(numJornadas + (1 if claveSM else 0)))\n cabJornadas = nombreJornadas[claveSM]\n ot = -1 if claveSM else 0\n\n # print(ot, seqDatos, cabJornadas)\n\n ws = workbook.add_worksheet(nombre)\n\n fila, columna = 0, 0\n\n ws.write_row(fila, columna, datosComunes['titularCabecera'], formatos['cabecera'])\n columna += len(datosComunes['titularCabecera']) + 1\n ws.write_row(fila, columna, cabJornadas, formatos['cabecera'])\n fila += 1\n columna = 0\n\n print(clave)\n for jug in datosComunes['claves']:\n ws.write_row(fila, columna, datosComunes['cabeceraLinea'][jug])\n columna += len(datosComunes['titularCabecera']) + 1\n datosJugador = datosJugadores[jug]\n\n if clave in datosJugador:\n datosAmostrar = datosJugador[clave]\n print(datosComunes['cabeceraLinea'][jug], datosAmostrar)\n comentarios = datosJugador['ResumenPartido']\n haJugado = datosJugador['haJugado']\n esLocal = datosJugador['esLocal']\n victoria = datosJugador['haGanado']\n jornada = datosJugador['Jornada']\n\n cv = zip(haJugado, esLocal, victoria, jornada, ([] if claveSM else [\"-\"]) + datosAmostrar)\n print(\"\\n\".join(map(str, cv)))\n ordenDatos = seqDatos if claveSM else datosJugador['OrdenPartidos']\n print(ordenDatos)\n\n for i in ordenDatos:\n if datosAmostrar[i] is not None:\n if i + ot >= 0:\n f = calculaFormato(victoria[i + ot], esLocal[i + ot], valorDecimal)\n valor = datosAmostrar[i] if haJugado[i + ot] else \"\"\n fechaAux = strftime(FORMATOfecha, datosJugador['FechaHora'][i + ot]) \\\n if datosJugador['FechaHora'][i + ot] else \"-\"\n print(fila, columna, fechaAux, valor, f)\n if comentarios[i + ot]:\n pass\n # print(comentarios[i + ot], valor)\n # ws.write_comment(fila, columna, comentarios[i + ot])\n\n else:\n valor = datosAmostrar[i]\n f = \"nulo\"\n print(fila, columna, \"-\", valor, f)\n ws.write(fila, columna, valor, formatos[f])\n columna += 1\n\n fila += 1\n columna = 0\n\n def addMetadata(workbook, datos):\n ws = workbook.add_worksheet(\"Metadata\")\n fila = 0\n columna = 0\n for l in datos:\n ws.write(fila, columna, l)\n fila += 1\n\n metadata = [\"Cargados datos SuperManager de %s\" % strftime(FORMATOtimestamp, supermanager.timestamp),\n \"Cargada información de temporada de %s\" % strftime(FORMATOtimestamp, temporada.timestamp),\n \"Ejecutado en %s\" % strftime(FORMATOtimestamp, gmtime())]\n\n datosComunes = preparaDatosComunes(jugData)\n\n # print(jugData)\n\n # print(datosComunes)\n\n # print(DumpDict(datosComunes['cabeceraLinea'], datosComunes['claves']))\n\n wb = Workbook(filename=nomFichero)\n formatos = preparaFormatos(wb)\n\n # creaHoja(wb, \"ValoracionSM\", \"valJornada\", jugData, datosComunes, formatos, nombreJornadas,\n # valorDecimal=True, claveSM=True)\n creaHoja(wb, \"Valoracion\", \"V\", jugData, datosComunes, formatos, nombreJornadas, valorDecimal=False,\n claveSM=False)\n # creaHoja(wb, \"PrecioSM\", \"precio\", jugData, datosComunes, formatos, nombreJornadas, valorDecimal=True,\n # claveSM=True)\n\n addMetadata(wb, metadata)\n\n wb.close()\n\n # jugOrdenados = [clave[0] for clave in\n # list(map(lambda x:(x,jugData['I-precio']), jugData)).sort(reverse=True,itemgetter=lambda y:y[1])]\n # print(jugOrdenados)\n # print(DumpDict(datosCabecera))\n # print(metadata)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n\n parser.add('-v', dest='verbose', action=\"count\", env_var='SM_VERBOSE', required=False, default=0)\n parser.add('-d', dest='debug', action=\"store_true\", env_var='SM_DEBUG', required=False, default=False)\n\n parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True)\n parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=True)\n\n parser.add('-o', dest='outfile', type=str, env_var='SM_OUTFILE', required=False)\n\n args = parser.parse_args()\n\n sm = SuperManagerACB()\n\n if 'infile' in args and args.infile:\n sm.loadData(args.infile)\n print(\"Cargados datos SuperManager de %s\" % strftime(FORMATOtimestamp, sm.timestamp))\n\n temporada = None\n if 'temporada' in args and args.temporada:\n temporada = TemporadaACB()\n temporada.cargaTemporada(args.temporada)\n print(\"Cargada información de temporada de %s\" % strftime(FORMATOtimestamp, temporada.timestamp))\n\n preparaExcel(sm, temporada)\n","repo_name":"cesaralba/jimenezIntelligence","sub_path":"InformeSuperManager.py","file_name":"InformeSuperManager.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"4511381875","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields\n\n\nclass MessageWizard(models.TransientModel):\n _name = 'cerp_core.message.wizard'\n\n message = fields.Text('Message', required=True)\n\n def action_ok(self):\n \"\"\" close wizard\"\"\"\n return {'type': 'ir.actions.act_window_close'}\n","repo_name":"clouderp/cerp","sub_path":"addons/cerp_core/models/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29388593254","text":"#5단계: 1차원 배열\n#5. 세준이는 기말고사를 망쳤다. 세준이는 점수를 조작해서 집에 가져가기로 했다. \n# 일단 세준이는 자기 점수 중에 최댓값을 골랐다. 이 값을 M이라고 한다. 그리고 나서 모든 점수를 점수/M*100으로 고쳤다.\n# 예를 들어, 세준이의 최고점이 70이고, 수학점수가 50이었으면 수학점수는 50/70*100이 되어 71.43점이 된다.\n# 세준이의 성적을 위의 방법대로 새로 계산했을 때, 새로운 평균을 구하는 프로그램을 작성하시오.\n\na = int(input())\n\nlist_a = list(map(int,input().split()))\n\nlist_max = max(list_a)\n\nsum = 0\navg = 0\n\nfor i in range(a):\n list_a[i] = list_a[i] / list_max * 100\n sum += list_a[i]\n\navg = sum / a\n\nprint(avg)\n","repo_name":"geunu97/Algorithm_Python","sub_path":"백준/Step5/(Baekjoon)Step5_Q5.py","file_name":"(Baekjoon)Step5_Q5.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"69968915718","text":"# !/usr/bin/env Python3\n# -*- coding: utf-8 -*-\n# @FILE : day11_28.py\n# @Author : Pluto.\n# @Time : 2020/11/28 10:27\ndef sxh():\n sxh = []\n for i in range(1, 1000):\n s = 0\n for j in str(i):\n s += int(j) ** 3\n if i == int(j) ** 3:\n sxh.append(i)\n return sxh\nif __name__ == '__main__':\n print(sxh())","repo_name":"cyg2695249540/generatewework","sub_path":"leetcode/day11_28.py","file_name":"day11_28.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"7391704752","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\n\n# read sources\nhun_sources = pd.read_csv(\"data/validation/hungarian_media_slant.csv\")[\"source\"]\n\nsources = hun_sources.unique().tolist()\nsources = sorted(sources, reverse=True)\n\n# read raw data\ndata = pd.read_csv(\"data/clean/citation_data.csv\")\n\ndata = data.dropna(subset=[\"source_url\"])\n\nfor i in [\n (\"'\", \"\"),\n ('\"', \"\"),\n (\"\\\\\", \"\"),\n (\"www.investor.hu\", \"www.origo.hu\"),\n (\"magyarnemzet.hu\", \"mno.hu\"),\n (\"fn.hu\", \"24.hu\"),\n (\"napigazdasag.hu\", \"magyaridok.hu\"),\n (\"/valasz.hu\", \"/hetivalasz.hu\"),\n]:\n data[\"source_url\"] = (\n data[\"source_url\"].str.strip().str.replace(i[0], i[1], regex=False)\n )\n\n# search in only the first part of the URL, as sites may refer to other sites later in the URL\ndata[\"source_url_short\"] = data[\"source_url\"].str[:33]\n\n# create source variable\ndata[\"source\"] = None\nfor source in sources:\n data.loc[lambda x: x[\"source_url_short\"].str.contains(source), \"source\"] = source\n\n# drop helper columns\ndata = data.drop(\"source_url_short\", axis=1)\n\n# drop if source not found\ndata = data.loc[lambda x: x[\"source\"].notnull()]\n\n# unify hard-news sections\ndata[\"section\"] = np.where(data[\"section\"] == \"itthon\", \"belfold\", data[\"section\"])\ndata[\"section\"] = np.where(data[\"section\"] == \"nagyvilag\", \"kulfold\", data[\"section\"])\ndata[\"section\"] = np.where(data[\"section\"] == \"kozelet\", \"belfold\", data[\"section\"])\ndata[\"section\"] = np.where(\n data[\"section\"] == \"uzleti-tippek\", \"gazdasag\", data[\"section\"]\n)\ndata[\"section\"] = np.where(data[\"section\"] == \"penzugy\", \"gazdasag\", data[\"section\"])\n\n# drop if there are more than one citation from one source in an article\ndata = data.drop_duplicates([\"url\", \"source\"])\n\ndata.to_csv(\"data/clean/citation_data_sample.csv\", index=False)\n","repo_name":"adamvig96/news-sharing","sub_path":"code/data-processing/citation_data_sample.py","file_name":"citation_data_sample.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33303159269","text":"import RPi.GPIO as GPIO\nimport time\n\nclass ManualController:\n def __init__(self):\n\n self.magnetron_pin = 18\n self.fan_pin = 23\n self.max_power = 10\n self.control_time = 20 # 10 -> 한 루프가 1/10초 100 -> 1/100\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.magnetron_pin, GPIO.OUT)\n GPIO.setup(self.fan_pin, GPIO.OUT)\n GPIO.output(self.magnetron_pin, True)\n GPIO.output(self.fan_pin, True)\n\n\n\n # t = int(input(\"enter time (s): \"))\n # power = int(input(\"enter power(1-{}): \".format(max_power)))\n\n self.start_time = time.time()\n self.duration = 0\n self.power = 0\n self.stop_flag = False\n\n def reset_param(self, power, duration):\n self.start_time = time.time()\n self.duration = duration\n self.power = power\n self.stop_flag = False\n GPIO.output(self.magnetron_pin, True)\n GPIO.output(self.fan_pin, True)\n print(\"target duration:\", duration)\n\n def run(self):\n\n if self.stop_flag:\n return True\n\n current_time = time.time()\n operation_time = current_time - self.start_time\n operation_range = operation_time % 10\n\n if operation_range < self.power:\n GPIO.output(self.fan_pin, False)\n GPIO.output(self.magnetron_pin, False)\n else:\n GPIO.output(self.fan_pin, False)\n GPIO.output(self.magnetron_pin, True)\n\n if operation_time > self.duration:\n GPIO.output(self.magnetron_pin, True)\n GPIO.output(self.fan_pin, True)\n self.stop_flag = True\n print(\"micro_wave_is_done\")\n\n return False\n","repo_name":"kwakdonghwan/Yonsei_deum","sub_path":"PythonCodes/FastDeum/control/microwave/manual_controller.py","file_name":"manual_controller.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"14591527414","text":"import csv \nimport psycopg2\n\ndef deleting_phone(username):\n cursor.execute(f\"\"\"DELETE FROM phone_book WHERE first_name = '{username}';\"\"\")\n\n\ndef insert_data():\n with open('data.csv', 'r') as file:\n data = csv.reader(file)\n for line in data:\n name = (\"\"\"INSERT INTO phone_book VALUES \n (DEFAULT, %s, %s, %s);\n \"\"\")\n cursor.execute(name, line)\n\ndef query_filtering_id():\n cursor.execute(\"\"\"SELECT * FROM phone_book WHERE id%2 = 0;\"\"\")\n for row in cursor.fetchall():\n print(row)\n\ndef query_filtering_range_and_in():\n cursor.execute(\"\"\"SELECT * FROM phone_book WHERE (id between 4 and 6) AND (id in (3, 5, 7));\"\"\")\n for row in cursor.fetchall():\n print(row)\n\ndef query_order():\n cursor.execute(\"\"\"SELECT first_name FROM phone_book ORDER BY first_name ASC;\"\"\")\n for row in cursor.fetchall():\n print(row)\n\n\nconnection = None\n\ntry:\n connection = psycopg2.connect(\n host=\"localhost\",\n dbname=\"phone_book\",\n user=\"postgres\", \n password=\"i_believe\",\n port=5432\n )\n\n connection.autocommit = True\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"SELECT version();\"\"\")\n print(f\"Server version: {cursor.fetchone()}\")\n\n\n with connection.cursor() as cursor:\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS phone_book(\n id serial PRIMARY KEY,\n first_name VARCHAR(255) NOT NULL,\n phone_number VARCHAR(13) NOT NULL,\n status VARCHAR(15) NOT NULL\n );\"\"\"\n )\n # connection.commit()\n print(\"[INFO] Table is created succesfully\") \n \n with connection.cursor() as cursor:\n cursor.execute(\"\"\"INSERT INTO phone_book (id, first_name, phone_number, status) \n VALUES\n (DEFAULT,'Nursat', '87474589586', 'family'),\n (DEFAULT,'Zhanserik', '87474574558', 'friend'),\n (DEFAULT, 'Nurzat', '87073071821', 'family'),\n (DEFAULT, 'Alinur', '87054540072', 'KBTU'),\n (DEFAULT, 'Mama', '87473610235', 'family'),\n (DEFAULT, 'Stranger', '87056384545', 'KBTU');\n \"\"\"\n )\n # connection.commit()\n print(\"[INFO] Data was successfully inserted\") \n \n \n with connection.cursor() as cursor:\n cursor.execute(\n \"\"\"SELECT first_name FROM phone_book WHERE id = 1\"\"\"\n )\n print(cursor.fetchone()) \n \n \n # with connection.cursor() as cursor:\n # cursor.execute(\n # \"\"\"DROP TABLE phone_book\"\"\"\n # )\n # print(\"[INFO] Table is deleted\") \n\n cursor = connection.cursor()\n \n update_script = \"\"\"UPDATE phone_book SET first_name = 'Brother' WHERE phone_number = '87073071821';\"\"\"\n cursor.execute(update_script)\n\n print( ''' Do you want to add new contact?!\\n (0 - No / 1- Yes)''')\n wish = input()\n if wish == '1':\n contact = []\n print(f\"Name:\")\n name = input()\n contact.append(name)\n print(\"Phone_number:\\n\")\n phone_number = input()\n contact.append(phone_number)\n status = input(\"Status:\\n\")\n contact.append(status)\n\n\n new_contact = (\"\"\"INSERT INTO phone_book VALUES \n (DEFAULT, %s, %s, %s);\n \"\"\")\n cursor.execute(new_contact, contact)\n del contact\n else:\n print(\"Lets continue!\")\n\n\n cursor.execute(\"\"\"SELECT first_name FROM phone_book WHERE phone_number = '87073071821';\"\"\")\n print(cursor.fetchone())\n\n deleting_phone('Brother')\n\n insert_data()\n\n query_filtering_id()\n\n query_filtering_range_and_in()\n\n query_order()\n\n cursor.close()\nexcept Exception as _ex:\n print(\"[INFO] Error while working with PostgreSQL\\n\", _ex)\nfinally:\n if connection is not None: \n connection.close() \n print(\"[INFO] PostgreSQL connection closed\")","repo_name":"Nursat22B030486/pp2-22B030486","sub_path":"TSIS_10/Phone_book/phone_book.py","file_name":"phone_book.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43306376258","text":"from InitialData import InitialData\nfrom ResourceManager import ResourceManager\nfrom Customer import Customer\nfrom HelperFunctions import HelperFunctions\n\n\ndef displayResourceLevels(resourceList):\n for r in resourceList:\n print(f\"{r.Name} has {r.Quantity} available.\") \n\n\n# Building my Data class\ndata = InitialData()\n\n# Building a resource manager and passing it my two dictionaries\nresourceManager = ResourceManager(data.INGREDIENTS, data.RECIPES)\n\n# Uses the Initial Ingredients to put the coffee machine at its initial values for water, milk and coffee \nresourceList = resourceManager.getResourcesList()\n\nallRecipes = resourceManager.getAllRecipes()\n\nfor currentRecipe in allRecipes:\n recipe = resourceManager.getRecipe(currentRecipe.getName())\n\ncustomer = Customer(\"Jason\", 0)\n\nisProductFinished = False\nwhile isProductFinished == False:\n coffeeMade = resourceManager.CreateProduct(recipe, customer)\n if coffeeMade == True:\n isProductFinished = True\n\n","repo_name":"UnderPaidMathematician/Continuing_Education","sub_path":"100 Days of Code/Day 15-58 Intermediate Projects/New Day 15 Coffee Maker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31533435172","text":"\"\"\"\nTokenizers\nUsage: python tokenizer.py < corpus.txt\n\"\"\"\n__author__ = \"Pierre Nugues\"\n\nimport sys\nimport regex as re\n\ntext = \"\"\"Tell me, O muse, of that ingenious hero who\ntravelled far and wide after he had sacked the famous\ntown of Troy.\"\"\"\n\n\ndef tokenize(text):\n \"\"\"uses the nonletters to break the text into words\n returns a list of words\"\"\"\n # words = re.split(r'[\\s\\-,;:!?.’\\'«»()–...&‘’“”*—]+', text)\n # words = re.split(r'[^a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’\\-]+', text)\n # words = re.split(r'\\W+', text)\n words = re.split(r'\\P{L}+', text)\n words.remove('')\n return words\n\n\ndef tokenize2(text):\n \"\"\"uses the letters to break the text into words\n returns a list of words\"\"\"\n # words = re.findall(r'[a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’\\-]+', text)\n # words = re.findall(r'\\w+', text)\n words = re.findall(r'\\p{L}+', text)\n return words\n\n\ndef tokenize3(text):\n \"\"\"uses the punctuation and nonletters to break the text into words\n returns a list of words\"\"\"\n # text = re.sub(r'[^a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’'()\\-,.?!:;]+', '\\n', text)\n # text = re.sub(r'([,.?!:;)('-])', r'\\n\\1\\n', text)\n text = re.sub(r'[^\\p{L}\\p{P}]+', '\\n', text)\n text = re.sub(r'(\\p{P})', r'\\n\\1\\n', text)\n text = re.sub(r'\\n+', '\\n', text)\n return text.split()\n\n\ndef tokenize4(text):\n \"\"\"uses the punctuation and symbols to break the text into words\n returns a list of words\"\"\"\n spaced_tokens = re.sub(r'([\\p{S}\\p{P}])', r' \\1 ', text)\n one_token_per_line = re.sub(r'\\s+', '\\n', spaced_tokens)\n tokens = one_token_per_line.split()\n return tokens\n\n\nif __name__ == '__main__':\n text = sys.stdin.read()\n \"\"\"words = tokenize(text)\n for word in words:\n print(word)\n words = tokenize2(text)\n print(words)\"\"\"\n words = tokenize4(text)\n print(words)\n","repo_name":"pnugues/ilppp","sub_path":"programs/ch05/python/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"62"} +{"seq_id":"36379679991","text":"import pickle\nimport random\nimport sys\nimport csv\n\ndef print_menu():\n print(\"-------------------------------------------------------------------------------------\")\n print(\"Welcome to the Python Booklist program!\\n\")\n print(\"This program allows you to write the title, author, genre and a summary of a book to a file.\\nYou can then view your booklist and search the booklist by title!\")\n print(\"-------------------------------------------------------------------------------------\")\n print(\"1. Add to booklist\")\n print(\"2. Search for an author in the booklist\")\n print(\"3. Print the full booklist\")\n print(\"4. Delete a book off the list by author\")\n print(\"5. Quit\")\n\n\n \n \ndef get_menu_choice():\n # for a choice and check it\n while (True):\n try:\n choice = int(input(\"\\nEnter your choice: \"))\n if (choice < 1 or choice > 5):\n print(\"Please select a valid option.\")\n continue\n except:\n print(\"Please enter a numeric value.\")\n else:\n break\n\n return choice\n\ndef main():\n print_menu()\n choice = get_menu_choice()\n bookRank = 0\n i = 0\n fullBookList = []\n filename =open('booklist.csv')\n for i in filename:\n fullBookList.append(i.split(\",\")) \n \n #if the user picks 1 in the menu they are going to add a line in the csv file that has the title, author and genre of the book\n if choice == 1:\n bookToAdd = input(\"Enter the name of the book: \")\n authorName = input(\"Enter the author of the book: \")\n genreOfBook = input(\"Enter the genre of the book: \")\n summaryOfBook = input(\"Write a summary of the book so you will remeber what it's about: \")\n\n \n #add the book to the list and file\n with open('booklist.csv','a') as fileName:\n fullBookList.append([bookToAdd,authorName,genreOfBook,summaryOfBook])\n fileName.write(str(len(fullBookList))+\",\")\n fileName.write(str(bookToAdd)+\",\")\n fileName.write(str(authorName)+\",\")\n fileName.write(str(genreOfBook)+\",\")\n fileName.write(str(summaryOfBook)+\",\" +\"\\n\")\n\n \n print(f\"Your book called {bookToAdd} written by {authorName} was added to your list!\")\n fileName.close()\n\n\n\n\n\n \n #option 2 allows the user to enter author of the book and print the book\n elif choice == 2:\n number = input(\"Enter the author you are looking for: \")\n\n \n\n #read csv, and split on \",\" the line\n filename = csv.reader(open('booklist.csv', \"r\"), delimiter=\",\")\n\n print(f\"\\nBooks by {number}\")\n print(\"--------------\")\n #loop through the csv list\n for row in filename:\n #if current rows 2nd value is equal to input, print that row\n\n if number == row[2]:\n print (row)\n \"\"\"\n this is not grabbing the right row\n filename = open('booklist.csv')\n bookNumber = 0\n test = False\n for i in filename:\n if test:\n #if you find the author print the books\n if searchKey in i:\n print(\"Books Found: \"+ str(i))\n else:\n print(\"That author is not on your list\")\n \n bookNumber +=1\n \"\"\"\n \n\n \n elif choice == 3:\n #print the full booklist.csv file\n filename =open('booklist.csv')\n for i in filename: \n print(i)\n\n \n elif choice == 4:\n #want to delete an entire row based off of the number/ author\n delKey = input(\"Enter author of the book you want to delete: \")\n for i in fullBookList:\n if i[2] == delKey:\n fullBookList.remove(i)\n #index won't work\n #if delKey.isdigit():\n # fullBookList.pop(int(delKey)-1)\n # else:\n \n #read csv, and split on \",\" the line\n with open('booklist.csv', \"w\") as filenameDel:\n \n #loop through the csv list\n index = 1\n for row in fullBookList:\n #fix index when you delete a value \n fullBookList[index -1][0] = str(index)\n\n #write the new values after something is del\n filenameDel.write(fullBookList[index - 1][0]+\",\"+ fullBookList[index-1][1]+\",\"+ fullBookList[index-1][2]+\",\"+ fullBookList[index-1][3]+\",\"+ fullBookList[index-1][4]+\"\\n\")\n index +=1 \n \n print(\"Book was deleted from the list\")\n\n elif choice == 5:\n sys.exit()\n \n play_again = input(\"\\n Add another book? (y/n): \")\n if play_again.lower() == \"y\":\n main()\n\n# Call main\nmain()\n","repo_name":"Chelseyb/Python-Book-List-Program","sub_path":"main_finalProject.py","file_name":"main_finalProject.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"37608786629","text":"#!/usr/bin/python\n# coding:utf-8\n\nimport pickle\nimport json\nfrom tqdm import tqdm\nimport jieba\nimport numpy as np\nimport os\n\ndef seg_line(line):\n return list(jieba.cut(line))\n\n\ndef seg_data(path):\n print ('\\nstart process {}'.format(path))\n data = []\n with open(path, 'r') as f:\n for line in tqdm(f):\n dic = json.loads(line, encoding='utf-8')\n if len(dic[\"alternatives\"].split(\"|\")) != 3:\n continue\n question = dic['query']\n doc = dic['passage']\n alternatives = dic['alternatives']\n data.append([seg_line(question), seg_line(doc), alternatives.split('|'), dic['query_id']])\n return data\n\n\ndef build_word_count(data):\n wordCount = {}\n\n def add_count(lst):\n for word in lst:\n if word not in wordCount:\n wordCount[word] = 0\n wordCount[word] += 1\n\n for one in data:\n [add_count(x) for x in one[0:3]]\n print ('word type size ', len(wordCount))\n return wordCount\n\n\ndef build_word2id(wordCount, threshold=10):\n word2id = {'': 0, '': 1}\n for word in wordCount:\n if wordCount[word] >= threshold:\n if word not in word2id:\n word2id[word] = len(word2id)\n else:\n chars = list(word)\n for char in chars:\n if char not in word2id:\n word2id[char] = len(word2id)\n print ('processed word size ', len(word2id))\n return word2id\n\n\ndef transform_data_to_id(raw_data, word2id):\n data = []\n\n def map_word_to_id(word):\n output = []\n if word in word2id:\n output.append(word2id[word])\n else:\n chars = list(word)\n for char in chars:\n if char in word2id:\n output.append(word2id[char])\n else:\n output.append(1)\n return output\n\n def map_sent_to_id(sent):\n output = []\n for word in sent:\n output.extend(map_word_to_id(word))\n return output\n\n for one in raw_data:\n question = map_sent_to_id(one[0])\n doc = map_sent_to_id(one[1])\n candidates = [map_word_to_id(x) for x in one[2]]\n length = [len(x) for x in candidates]\n max_length = max(length)\n if max_length > 1:\n pad_len = [max_length - x for x in length]\n candidates = [x[0] + [0] * x[1] for x in zip(candidates, pad_len)]\n data.append([question, doc, candidates, one[-1]])\n return data\n\n\ndef process_data(data_path, threshold, embedding_path=None, wordvec_path=None, embed_size=200):\n train_file_path = data_path + 'trainingset/train.json'\n dev_file_path = data_path + 'validationset/valid.json'\n test_a_file_path = data_path + 'testa/testa.json'\n # test_b_file_path = data_path + 'ai_challenger_oqmrc_testb_20180816/ai_challenger_oqmrc_testb.json'\n path_lst = [train_file_path, dev_file_path, test_a_file_path] #, test_b_file_path]\n output_path = [data_path + x for x in ['train.pickle', 'dev.pickle','testa.pickle']] # , 'testb.pickle']]\n return _process_data(path_lst, threshold, output_path, embedding_path, wordvec_path, embed_size)\n\n\ndef _process_data(path_lst, word_min_count=5, output_file_path=[],\n embedding_path=None, wordvec_path=None, embed_size=200):\n\n raw_data = []\n for path in path_lst:\n raw_data.append(seg_data(path))\n\n if os.path.exists(data_path + \"word-count.pickle\"):\n print(\"load word-count from {}\".format(data_path + \"word-count.pickle\"))\n word_count = pickle.load(data_path + \"word-count.pickle\")\n else:\n word_count = build_word_count([y for x in raw_data for y in x])\n with open(data_path + 'word-count.pickle', 'wb') as f:\n pickle.dump(word_count, f)\n\n if os.path.exists(data_path + \"word2id.pickle\"):\n print(\"load word2id from {}\".format(data_path + \"word2id.pickle\"))\n word2id = pickle.load(data_path + \"word2id.pickle\")\n else:\n word2id = build_word2id(word_count, word_min_count)\n with open(data_path + 'word2id.pickle', 'wb') as f:\n pickle.dump(word2id, f)\n for one_raw_data, one_output_file_path in zip(raw_data, output_file_path):\n with open(one_output_file_path, 'wb') as f:\n one_data = transform_data_to_id(one_raw_data, word2id)\n pickle.dump(one_data, f)\n\n embedding = np.random.normal(0, 0.1, (len(word2id), embed_size))\n embedding[0] = 0\n embedding[1] = 0\n if not os.path.exists(wordvec_path):\n print(\"load embedding vector from {}\".format(embedding_path))\n with open(embedding_path, \"r\") as f:\n count = 0\n for i, line in tqdm(enumerate(f)):\n if i > 800000:\n break\n content = line.strip().split()\n tokens = content[0]\n if tokens not in word2id:\n continue\n count += 1\n embedding[word2id[tokens]] = np.asarray(list(map(float, content[1:])))\n print(\"pre-trained words {}\".format(count))\n np.save(wordvec_path, embedding)\n else:\n print(\"load wordvec from the npy file {}\".format(wordvec_path))\n embedding = np.load(wordvec_path)\n return len(word2id), embedding\n\nif __name__ == \"__main__\":\n data_path = \"/home/zhengyinhe/xie_data/data/\"\n embedding_path = \"/home/zhengyinhe/xie_data/data/Tencent_AILab_ChineseEmbedding.txt\"\n wordvec_apth = \"/home/zhengyinhe/xie_data/data/wordvec_200.npy\"\n process_data(data_path, 5, embedding_path, wordvec_apth, embed_size=200)\n\n\"\"\"\nword type size 332265\nprocessed word size 96973\nload embedding vector from /home/zhengyinhe/xie_data/data/Tencent_AILab_ChineseEmbedding.txt\n796219it [00:16, 47926.30it/s]pre-trained words 86944\n\"\"\"\n","repo_name":"ooooooooe/torch_MwAN_AIChallenger","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12740535720","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom textwrap import wrap\n\n\nmetadata = pd.read_csv('./the-movies-dataset/movies_metadata.csv', low_memory=False)\n\n# Calculation based on the IMDB formula\n# Weighted Rating (WR) = (v/(v+m) * R) + (m/(m+v) * C)\n# where,\n\n# v is the number of votes for the movie;\n# m is the minimum votes required to be listed in the chart;\n# R is the average rating of the movie; And\n# C is the mean vote across the whole report\nC = metadata['vote_average'].mean()\n\nm = metadata['vote_count'].quantile(0.90)\n\nvalid_movies = metadata.copy().loc[metadata['vote_count'] >= m]\n\ndef weighted_rating(x, m=m, C=C):\n v = x['vote_count']\n R = x['vote_average']\n return (v/(v+m) * R) + (m/(m+v) * C)\n\nvalid_movies['score'] = valid_movies.apply(weighted_rating, axis=1)\nvalid_movies = valid_movies.sort_values('score', ascending=False)\n\nprint(valid_movies[['title', 'vote_count', 'vote_average', 'score']].head(15))\n\n# titles = np.array(valid_movies[['title']].head(15)).reshape(15,1)\n# score = np.array(valid_movies[['score']].head(15))\n\n# from pylab import rcParams\n# rcParams['figure.figsize'] = 35,20\n\n# titles = [ '\\n'.join(wrap(l, 10)) for l in titles ]\n# # print(titles.shape)\n# # print(score.shape)\n\n# # ind = np.arange(1,len(score)+1)\n\n# ind = np.arange(1,len(score)+1)\n# print(titles)\n# print(ind)\n# plt.title(\"\")\n# plt.ylabel(\"Score\")\n# plt.xlabel(\"Movie Title\")\n# plt.bar(ind, score, align = 'center')\n# plt.legend()\n# plt.show()","repo_name":"SaadatRizvi/Movie-Recommendation-System","sub_path":"simpleIMDB.py","file_name":"simpleIMDB.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"62"} +{"seq_id":"28261999813","text":"import machine\nimport time\n\nclass LedBlink:\n def __init__(self, pin=2):\n self.pin = machine.Pin(pin, machine.Pin.OUT)\n\n def loop(self, test=False):\n while True:\n # there is a bug in nodemcu\n # due to which pin.on() actually works opposite\n print(\"led value => off\")\n self.pin.on()\n time.sleep(0.5)\n\n print(\"led value => on\")\n self.pin.off()\n time.sleep(0.5)\n \n\n if test:\n # turn off the led\n # and exit program\n self.pin.on()\n\n break\n\nprint(\"Begin Led toogle program\")\nLedBlink().loop()","repo_name":"imneonizer/MicroPython-NodeMCU","sub_path":"samples/led.py","file_name":"led.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3086582859","text":"from flask import Flask, request\r\nfrom flask_restful import Resource, Api\r\nfrom flask_cors import CORS\r\nimport pandas as pd\r\nimport json\r\nimport re\r\nimport csv \r\n\r\nimport array as a\r\nimport json\r\n\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import LSTM\r\nimport numpy \r\nfrom numpy import array\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\napi = Api(app)\r\n\r\ndef find_pred(employee):\r\n\r\n cols = ['Date', 'ID', 'Name','Score'] \r\n\r\n pdObj = pd.read_json('https://timeseries-emp.herokuapp.com/readdata')\r\n\r\n data = pdObj.Data\r\n\r\n cvsfile=\"\"\r\n data_file = open('./file.csv', 'w')\r\n csv_writer = csv.writer(data_file)\r\n count = 0\r\n for d in data:\r\n if count == 0:\r\n \r\n # Writing headers of CSV file\r\n header = d.keys()\r\n csv_writer.writerow(header)\r\n count += 1\r\n \r\n # Writing data of CSV file\r\n csv_writer.writerow(d.values())\r\n \r\n data_file.close()\r\n\r\n df = pd.read_csv('./file.csv')\r\n\r\n\r\n booleans = []\r\n\r\n\r\n\r\n for result in df.id:\r\n if re.search(employee, result):\r\n booleans.append(True)\r\n else:\r\n booleans.append(False)\r\n\r\n filter = pd.Series(booleans)\r\n\r\n df = df[filter]\r\n\r\n\r\n df1=df.reset_index()[\"score\"]\r\n\r\n from sklearn.preprocessing import MinMaxScaler\r\n scaler=MinMaxScaler(feature_range=(0,1))\r\n df1=scaler.fit_transform(numpy.array(df1).reshape(-1,1))\r\n\r\n\r\n\r\n def create_dataset(dataset, time_step=1):\r\n dataX, dataY = [], []\r\n for i in range(len(dataset)-time_step-1):\r\n a = dataset[i:(i+time_step), 0]\r\n dataX.append(a)\r\n dataY.append(dataset[i + time_step, 0])\r\n return numpy.array(dataX), numpy.array(dataY)\r\n\r\n time_step = 7\r\n X_train, y_train = create_dataset(df1, time_step)\r\n\r\n X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)\r\n\r\n model=Sequential()\r\n model.add(LSTM(50,return_sequences=True,input_shape=(7,1)))\r\n model.add(LSTM(50,return_sequences=True))\r\n model.add(LSTM(50))\r\n model.add(Dense(1))\r\n model.compile(loss=\"mean_squared_error\", optimizer=\"adam\")\r\n\r\n model.fit(X_train, y_train, validation_data=(X_train,y_train), epochs=5, batch_size=64, verbose=1)\r\n\r\n to_predict=df1[len(df1)-7:len(df1)]\r\n to_predict=to_predict.reshape(1,7,1)\r\n\r\n train_predict=model.predict(to_predict)\r\n\r\n train_predict = scaler.inverse_transform(train_predict)\r\n\r\n\r\n train_predict=train_predict.reshape(1)\r\n\r\n # d = dict(enumerate(train_predict.flatten(), 1))\r\n jsonString=json.dumps(str(train_predict))+employee\r\n\r\n return jsonString\r\n\r\n\r\n\r\n\r\n\r\nclass lstm(Resource):\r\n\r\n def get(self):\r\n query = request.args.get('query')\r\n\r\n pred_value= find_pred(query)\r\n\r\n return pred_value\r\n\r\napi.add_resource(lstm, '/')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"akhilr2505/WELLTRACK","sub_path":"MachineLearningFiles/LSTM/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"30180747219","text":"import numpy as np\r\n\r\n\r\ndef get_centers(X, C, K):\r\n last_distance = distance(C[0], X[0]) #Storage for the distance of the last cluster checked\r\n new_c = np.zeros((K, X[0].size)) #output center\r\n closest_cluster = 0 #index of the cluster closest to the sample\r\n num_per_cluster = np.zeros(K) #Number of samples in each cluster, as a 1D array\r\n\r\n '''Iterate over all of the clusters. Find the closest cluster. Increase the\r\n number of samples in that cluster, and then add the sample to the cluster'''\r\n for i in range(0, X[...,0].size):\r\n for j in range(0, K):\r\n if last_distance > distance(C[j], X[i]):\r\n closest_cluster = j\r\n last_distance = distance(C[j], X[i])\r\n new_c[closest_cluster] = new_c[closest_cluster] + X[i]\r\n num_per_cluster[closest_cluster] = num_per_cluster[closest_cluster] + 1\r\n #Take each summed cluster, and then divide it by the number of samples in that cluster\r\n for cluster in range(0, K):\r\n if not(num_per_cluster[cluster] == 0):\r\n new_c[cluster] = new_c[cluster] / num_per_cluster[cluster]\r\n\r\n return new_c\r\n\r\n\r\ndef distance(P1, P2):\r\n #Takes the 2 norm\r\n return np.linalg.norm(P1-P2)\r\n\r\n\r\ndef K_Means_recur(X, C, K, depth):\r\n #Get the centers\r\n new_c = get_centers(X, C, K)\r\n cont = False\r\n #if the centers are no longer moving, don't continue. Else, continue\r\n #Caps out at a recursive depth of 600, otherwise it hits python's recursion limit\r\n for cluster in range(0, K):\r\n while np.array_equal(new_c[cluster], np.zeros(2)):\r\n new_c[cluster] = np.random.random_integers(np.amin(X), np.amax(X), X[0].size)\r\n for i in range(0, K):\r\n\r\n if not(distance(C[i], new_c[i]) == 0) and not(depth >= 100):\r\n cont = True\r\n if not cont:\r\n return new_c\r\n return K_Means_recur(X, new_c, K, depth + 1)\r\n\r\n\r\ndef K_Means(X, K):\r\n #randomly generate centers\r\n c_out = np.zeros((K, X[0].size)) #initialize output cluster\r\n C = np.zeros((K, X[0].size)) #initialize clusters to zero\r\n\r\n for cluster in range(0, K):\r\n C[cluster] = np.random.random_integers(np.amin(X), np.amax(X), X[0].size) #randomize starting clusters along max and min\r\n c_out = K_Means_recur(X, C, K, 0) # recursively generate the cluster\r\n\r\n while np.unique(c_out, axis=0)[..., 0].size != K: #if it contains duplicate clusters, regenerate\r\n c_out = K_Means(X, K)\r\n\r\n return c_out\r\n\r\n\r\ndef K_Means_better(X, K): #inefficient. Please wait. It will output\r\n #dictionary to hold the number of times a cluster appears\r\n num_dict = dict()\r\n #dictionary to hold the percentage of total times the cluster appears\r\n percent_dict = dict()\r\n\r\n percent_largest = 0.0\r\n times_performed = 1\r\n cluster = np.zeros((K, X[0].size))\r\n\r\n while (percent_largest < .50 or times_performed < 50) and times_performed < 100:\r\n #Lists do not work as keys. Convert to string to use as key\r\n new_mean = np.array2string(K_Means(X, K))\r\n #Add or increment cluster centers to dictionary\r\n if (new_mean in num_dict):\r\n num_dict[new_mean] = num_dict[new_mean] + 1\r\n else:\r\n num_dict[new_mean] = 1\r\n #Update all percentages\r\n for state in num_dict:\r\n percent_dict[state] = num_dict[state] / times_performed\r\n\r\n cluster_string = max(num_dict, key=num_dict.get)\r\n #Lines 70-75 convert the string back into an np array\r\n cluster_string_array = cluster_string[1:len(cluster_string) - 1].splitlines()\r\n\r\n for i in range(0, K):\r\n stre = cluster_string_array[i][cluster_string_array[i].index('[') + 1:cluster_string_array[i].index(']')]\r\n array = stre.split()\r\n for j in range(0, len(array)):\r\n cluster[i][j] = float(array[j])\r\n percent_largest = percent_dict[new_mean]\r\n times_performed = times_performed + 1\r\n\r\n return cluster\r\n","repo_name":"tfinnegan937/Machine-Learning---Clustering-Decision-Trees","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5737967994","text":"import os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport scipy as sci\nimport math\nimport torch\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom collections import Counter\nfrom sklearn import preprocessing\nimport imblearn.over_sampling as ib\nfrom sklearn.metrics import accuracy_score\n# Plot style\nsns.set(style=\"whitegrid\")\n\n\n\n\ndef visualize_relevance(labels, relevance, target_label):\n # Visualize feature-wise relevance scores as barchart\n plt.figure(figsize=(8, 8))\n # print([(labels[i], relevance[i]) for i in range(len(labels))])\n plt.barh(labels, relevance, height=1)\n plt.title(target_label)\n plt.xlabel(\"Relevance\")\n plt.show()\n\ndef _get_model_name_dict():\n dic = {\n 'Risk of Violence_decile_score/10': 'viol_dec10',\n 'Risk of Violence_decile_score': 'viol_dec',\n 'Risk of Violence_raw_score': 'viol_raw',\n 'Risk of Recidivism_decile_score/10': 'rec_dec10',\n 'Risk of Recidivism_decile_score': 'rec_dec',\n 'Risk of Recidivism_raw_score': 'rec_raw',\n }\n return dic\n\n\ndef save_model(model, base_path, name):\n try:\n name = _get_model_name_dict()[name]\n except:\n pass\n print(\"Saving %s model.\" % name)\n\n path = os.path.join(base_path, \"saves\")\n os.makedirs(path, exist_ok=True)\n torch.save(model, os.path.join(path, name + \".model\"))\n\n\ndef load_model(name, base_path):\n try:\n name = _get_model_name_dict()[name]\n except:\n pass\n print(\"Loading %s model.\" % name)\n\n path = os.path.join(base_path, \"saves\")\n model = torch.load(os.path.join(path, name + \".model\"))\n return model\n\n\ndef make_weight_table(labels):\n \n labels = list(labels)\n labels = [float(l) for l in labels]\n c = Counter(labels)\n l = len(labels)\n frac = {}\n for key, count in c.items():\n frac[key] = 1/(count/l)\n \n del c\n \n return frac\n\n\ndef oversample(X_train, y_train, target, weighting= ['random'], random_state = 12345):\n matching_dic = {'Risk of Recidivism_decile_score': 'Risk of Recidivism_decile_score',\n 'Risk of Recidivism_decile_score/10': 'Risk of Recidivism_decile_score',\n 'Risk of Recidivism_raw_score': 'Risk of Recidivism_decile_score',\n 'recid': 'recid',\n 'recid_proPub': 'recid_proPub',\n 'Recidivism_decile': 'Recidivism_decile',\n 'Recidivism_decile/10': 'Recidivism_decile',\n 'Risk of Violence_decile_score': 'Risk of Violence_decile_score',\n 'Risk of Violence_decile_score/10': 'Risk of Violence_decile_score',\n 'Risk of Violence_raw_score': 'Risk of Violence_decile_score',\n 'recid_violent': 'recid_violent',\n 'recid_violent_proPub': 'recid_violent_proPub',\n }\n \n cat_mask = ['p_current_on_probation', 'is_misdem','race_black', 'race_white', 'race_hispanic', 'race_asian',\n 'race_native', 'is_male']\n weighting += [5,5]\n colnames = list(X_train.columns)\n #x_index = X_train.index.values\n #y_index = y_train.index.values\n cat_mask = np.array([True if col in cat_mask else False for col in colnames ] + [True])\n \n oversampler = {'random': ib.RandomOverSampler(sampling_strategy='auto', random_state=random_state),\n 'ADASYN': ib.ADASYN(sampling_strategy='not majority', random_state=random_state,\n n_neighbors=weighting[1], n_jobs=1),\n 'SMOTE': ib.SMOTE(sampling_strategy='not majority', random_state=random_state, k_neighbors=weighting[1]),\n 'SMOTENC': ib.SMOTENC(categorical_features=cat_mask, sampling_strategy='not majority',\n random_state=random_state,k_neighbors=weighting[1], n_jobs=1),\n 'BorderlineSMOTE': ib.BorderlineSMOTE(sampling_strategy='not majority', random_state=random_state,\n k_neighbors=weighting[1],\n n_jobs=1,m_neighbors=10, kind='borderline-1'),\n 'SVMSMOTE': ib.SVMSMOTE(sampling_strategy='not majority', random_state=random_state,\n k_neighbors=weighting[1], n_jobs=1,\n m_neighbors=weighting[2], svm_estimator=None, out_step=0.5),\n 'KMeansSMOTE':ib.KMeansSMOTE(sampling_strategy='not majority', random_state=random_state, \n k_neighbors=weighting[1], n_jobs=1, kmeans_estimator=None,\n cluster_balance_threshold='auto',\n density_exponent='auto'),\n }\n \n strategy = weighting[0]\n \n X_train['target'] = y_train[target]\n X_os, y_os = oversampler[strategy].fit_resample(X_train, y_train[matching_dic[target]])\n X_train.drop(labels=['target'], inplace = True, axis=1)\n if strategy == 'random':\n \n i_os = list(oversampler[strategy].sample_indices_)\n \n y_os = y_train.iloc[i_os]\n X_os = X_train.iloc[i_os]\n else:\n #label score - mostly decile\n y_os = np.array([list(y_os)])\n #adding the actual target score\n y_os = np.transpose(np.concatenate((y_os[0:,], np.array([list(X_os['target'])])), axis=0))\n\n y_os = pd.DataFrame(data= y_os[0:,0:],\n index = [i for i in range(y_os.shape[0])],\n columns = [matching_dic[target], target])\n\n #remove generated target score from x\n X_os.drop(labels=['target'], inplace=True, axis=1)\n\n return X_os, y_os\n\n\ndef rescale_df(df, min_val=0, max_val=1, variable_dependent=False):\n # Scale data to [0,1]\n min_max_scaler = preprocessing.MinMaxScaler((min_val, max_val))\n np_scaled = min_max_scaler.fit_transform(df)\n df_X_norm = pd.DataFrame(np_scaled, columns=df.columns, index=df.index)\n\n if variable_dependent:\n for c in df_X_norm.columns:\n if str(c) in [\"p_current_age\", \"p_age_first_offense\"]:\n # min_max_scaler = preprocessing.MinMaxScaler((-1, 1))\n min_max_scaler = preprocessing.StandardScaler()\n np_scaled = min_max_scaler.fit_transform(pd.DataFrame(df[c]))\n # print(np_scaled)\n df_X_norm[c] = np_scaled\n\n return df_X_norm\n\ndef custom_train_test_split(df_X_norm, df_y, matchframe, dataset, test_size=0.25, stratify=None, random_state=0):\n if \"reduced_size\" not in dataset:\n\n df_X_s = df_X_norm.loc[list(matchframe.is_reduced)]\n df_y_s = df_y.loc[list(matchframe.is_reduced)]\n df_X_e = df_X_norm.loc[[not i for i in list(matchframe.is_reduced)]]\n df_y_e = df_y.loc[[not i for i in list(matchframe.is_reduced)]]\n if stratify:\n stratl = df_y_s[stratify]\n else:\n stratl = None\n X_train, X_val, y_train, y_val = train_test_split(df_X_s, df_y_s, test_size=test_size,\n random_state=random_state, stratify=stratl)\n\n if stratify:\n stratl = y_train[stratify]\n # split into train and validation; TODO possibly stratify\n X_val, X_test, y_val, y_test = train_test_split(X_train, y_train, test_size=test_size,\n random_state=random_state, stratify=stratl)\n\n ##add those inds to train set which are not in the reduced set\n X_train = pd.concat([X_train, df_X_e], axis=0)\n y_train = pd.concat([y_train, df_y_e], axis=0)\n\n\n\n else:\n if stratify:\n stratl = df_y[stratify]\n else:\n stratl = None\n X_train, X_test, y_train, y_test = train_test_split(df_X_norm, df_y, test_size=test_size,\n random_state=random_state, stratify=stratl)\n if stratify:\n stratl = y_train[stratify]\n else:\n stratl = None\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=test_size,\n random_state=random_state, stratify=stratl)\n\n return X_train, X_val, X_test, y_train, y_val, y_test\n\ndef marker(cat, recid, group = ['medium','high']):\n \n if cat.lower() in group and recid == 1:\n return(0.5)\n elif cat.lower() in group and recid !=1:\n return(1)\n elif cat.lower() not in group and recid == 1:\n return(-1)\n elif cat.lower() not in group and recid !=1:\n return(-0.5)\n \n\ndef iterative_marker(dec, recid, iterative_cutoff=4):\n if dec >= iterative_cutoff and recid ==1:\n return(0.5)\n elif dec >= iterative_cutoff and recid !=1:\n return(1)\n elif dec < iterative_cutoff and recid == 1:\n return(-1)\n elif dec < iterative_cutoff and recid !=1:\n return(-0.5)\n\n\ndef calc_rates(rates_list, compas_pred, true_recid, cutoff, abso=False):\n tp = [1 if rates_list[i] < 1 and compas_pred[i] >= cutoff and true_recid[i] == 1 else 0 for i in\n range(len(rates_list))]\n fp = [1 if rates_list[i] < 1 and compas_pred[i] >= cutoff and true_recid[i] != 1 else 0 for i in\n range(len(rates_list))]\n tn = [1 if (rates_list[i] >= 1 and compas_pred[i] >= cutoff and true_recid[i] != 1) or (\n compas_pred[i] < cutoff and true_recid[i] != 1) else 0 for i in range(len(rates_list))]\n fn = [1 if (rates_list[i] >= 1 and compas_pred[i] >= cutoff and true_recid[i] == 1) or (\n compas_pred[i] < cutoff and true_recid[i] == 1) else 0 for i in range(len(rates_list))]\n\n try:\n fd = sum(fp)/sum(fp+tp)\n except:\n fd = 0\n try:\n fo = sum(fn)/sum(fn+tn)\n except:\n fo = 0\n try:\n omr = sum(fp+fn)/sum(tp+fp+tn+fn)\n except:\n omr = 0\n if abso:\n return sum(tp), sum(fp), sum(tn), sum(fn), fd, fo, omr\n\n if sum(true_recid) > 0:\n tp = sum(tp) / sum(true_recid)\n else:\n tp = 0\n if (len(true_recid) - sum(true_recid)) > 0:\n fp = sum(fp) / (len(true_recid) - sum(true_recid))\n else:\n fp = 0\n if (len(true_recid) - sum(true_recid)) > 0:\n tn = sum(tn) / (len(true_recid) - sum(true_recid))\n\n else:\n tn = 0\n if sum(true_recid) > 0:\n fn = sum(fn) / sum(true_recid)\n\n else:\n fn = 0\n\n return tp, fp, tn, fn, fd, fo, omr\n\n\ndef calc_rates_two_models(rates_list, true_recid, abso=False):\n tp = [1 if rates_list[i] == 1 and true_recid[i] == 1 else 0 for i in range(len(rates_list))]\n fp = [1 if rates_list[i] == 1 and true_recid[i] != 1 else 0 for i in range(len(rates_list))]\n tn = [1 if rates_list[i] != 1 and true_recid[i] != 1 else 0 for i in range(len(rates_list))]\n fn = [1 if rates_list[i] != 1 and true_recid[i] == 1 else 0 for i in range(len(rates_list))]\n\n try:\n fd = sum(fp) / sum(fp + tp)\n except:\n fd = 0\n try:\n fo = sum(fn) / sum(fn + tn)\n except:\n fo = 0\n\n try:\n omr = sum(fp + fn) / sum(tp + fp + tn + fn)\n except:\n omr = 0\n\n if abso:\n return sum(tp), sum(fp), sum(tn), sum(fn), fd, fo, omr\n\n if sum(true_recid) > 0:\n tp = sum(tp) / sum(true_recid)\n else:\n tp = 0\n if (len(true_recid) - sum(true_recid)) > 0:\n fp = sum(fp) / (len(true_recid) - sum(true_recid))\n else:\n fp = 0\n if (len(true_recid) - sum(true_recid)) > 0:\n tn = sum(tn) / (len(true_recid) - sum(true_recid))\n\n else:\n tn = 0\n if sum(true_recid) > 0:\n fn = sum(fn) / sum(true_recid)\n\n else:\n fn = 0\n\n return tp, fp, tn, fn, fd, fo, omr\n\n\n\ndef calc_compas_proportions(compas_pred, true_recid, cutoff, abso=False):\n\n\n tp = [1 if compas_pred[i] >= cutoff and true_recid[i] == 1 else 0 for i in range(len(compas_pred))]\n fp = [1 if compas_pred[i] >= cutoff and true_recid[i] != 1 else 0 for i in range(len(compas_pred))]\n tn = [1 if compas_pred[i] < cutoff and true_recid[i] != 1 else 0 for i in range(len(compas_pred))]\n fn = [1 if compas_pred[i] < cutoff and true_recid[i] == 1 else 0 for i in range(len(compas_pred))]\n\n try:\n fd = sum(fp) / sum(fp + tp)\n except:\n fd = 0\n try:\n fo = sum(fn) / sum(fn + tn)\n except:\n fo = 0\n try:\n omr = sum(fp + fn) / sum(tp + fp + tn + fn)\n except:\n omr = 0\n\n c = tp + tn\n if abso:\n return sum(tp), sum(fp), sum(tn), sum(fn), sum(c), fd, fo, omr\n\n if sum(true_recid) > 0:\n tp = sum(tp) / sum(true_recid)\n else:\n tp = 0\n if (len(true_recid) - sum(true_recid)) > 0:\n fp = sum(fp) / (len(true_recid) - sum(true_recid))\n else:\n fp = 0\n if (len(true_recid) - sum(true_recid)) > 0:\n tn = sum(tn) / (len(true_recid) - sum(true_recid))\n\n else:\n tn = 0\n if sum(true_recid) > 0:\n fn = sum(fn) / sum(true_recid)\n\n else:\n fn = 0\n\n\n return tp, fp, tn, fn, round(sum(c)/len(c), 2), fd, fo, omr\n\n\ndef use_compas_pred_for_missing(y_val, idc, pred, cutoff, col):\n tmp = y_val.copy(deep=True)\n # get keys (pos and negative)\n\n for key in idc.keys():\n if '_FP' in str(key):\n posk = str(key)\n else:\n negk = str(key)\n posi = [elem for elem in list(idc[posk][cutoff[0]])]\n negi = [elem for elem in list(idc[negk][cutoff[0]])]\n\n conc = [posi[i] or negi[i] for i in range(len(posi))]\n n_idx = [not elem for elem in [posi[i] or negi[i] for i in range(len(posi))]]\n\n ##add the COMPAS cutoff jailing, if the individ. does not appear in FP or FN list\n # print('Amount of positive indices for cutoff {}'.format(cutoff[1]))\n # print(Counter(posi))\n # print('Amount of negative indices for cutoff {}'.format(cutoff[1]))\n # print(Counter(negi))\n # print('{} FP predictions of DT for cutoff {}'.format(sum(pred[posk][cutoff[0]]), cutoff[1]))\n # print('{} FN predictions of DT for cutoff {}'.format(sum(pred[negk][cutoff[0]]), cutoff[1]))\n\n # with two models there should be nothing happening here\n tmp.loc[n_idx, 'pred'] = [1 if el >= cutoff[1] else 0 for el in tmp.loc[n_idx, col]]\n\n # only add predictions if there are acutally predictions made\n if True in posi:\n # invert FP as it would mean that DT assumes them to be negative\n tmp.loc[posi, 'pred'] = [abs(el - 1) for el in pred[posk][cutoff[0]]]\n if True in negi:\n tmp.loc[negi, 'pred'] = pred[negk][cutoff[0]]\n\n return list(tmp.pred)\n\n\n##fix calculation and divsion\ndef calc_prorportions(y_val, rates_list_l, bool_select, compas_pred, true_recid,\n rn=range(1, 10), mask=None, abso=False, zafar=False, index_gini = False):\n if type(mask) == type(None):\n mask = [True for _ in range(0, y_val.shape[0])]\n tp_p = []\n fp_p = []\n tn_p = []\n fn_p = []\n fd_p = []\n fo_p = []\n omr_p = []\n tp_c = []\n fp_c = []\n tn_c = []\n fn_c = []\n fd_c = []\n fo_c = []\n omr_c = []\n corr = []\n corr_c = []\n gini = []\n gini_c = []\n\n #Zafar et al. 2017 , false discovery, false omission, overall misclassification rate\n\n for i, r in enumerate(rn):\n\n if index_gini:\n tp_t, fp_t, tn_t, fn_t, cc, fd, fo, omr = calc_compas_proportions(y_val.loc[mask, compas_pred],\n y_val.loc[mask, true_recid], r, abso=True)\n\n gini_c.append(gini_index_from_contingency(tp_t, fp_t, tn_t, fn_t))\n\n else:\n\n tp_t, fp_t, tn_t, fn_t, cc, fd, fo, omr = calc_compas_proportions(y_val.loc[mask, compas_pred],\n y_val.loc[mask, true_recid], r, abso=abso)\n\n\n\n tp_c.append(tp_t)\n fp_c.append(fp_t)\n tn_c.append(tn_t)\n fn_c.append(fn_t)\n fd_c.append(fd)\n fo_c.append(fo)\n omr_c.append(omr)\n corr_c.append(cc)\n pred = use_compas_pred_for_missing(y_val, bool_select, rates_list_l, (i, r), compas_pred)\n\n #calc number of correct predictions\n c = np.sum(y_val.loc[mask, true_recid].to_numpy() == np.array(pred)[mask])\n if not abso:\n c = c/y_val.loc[mask, true_recid].shape[0]\n corr.append(c)\n\n # print(Counter(pred))\n # print('\\n')\n # tp, fp, tn, fn = calc_rates(list(np.array(pred)[mask]), y_val.loc[mask, compas_pred],\n # y_val.loc[mask, true_recid], cutoff=r, abso = abso)\n\n if index_gini:\n tp, fp, tn, fn, fd, fo, omr = calc_rates_two_models(list(np.array(pred)[mask]),\n y_val.loc[mask, true_recid], abso=True)\n\n gini.append(gini_index_from_contingency(tp, fp, tn, fn))\n else:\n tp, fp, tn, fn, fd, fo, omr = calc_rates_two_models(list(np.array(pred)[mask]),\n y_val.loc[mask, true_recid], abso=abso)\n tp_p.append(tp)\n fp_p.append(fp)\n tn_p.append(tn)\n fn_p.append(fn)\n fd_p.append(fd)\n fo_p.append(fo)\n omr_p.append(omr)\n\n if index_gini:\n return gini, gini_c\n elif zafar:\n return fd_p, fp_p, fo_p, fn_p, fd_c, fp_c, fo_c, fn_c, corr, corr_c\n else:\n return tp_p, fp_p, tn_p, fn_p, tp_c, fp_c, tn_c, fn_c, corr, corr_c\n\n\n\ndef transform_model_results(results_dic, result_key, mask = None, abso=False, zafar=False):\n\n tpc = []\n fpc = []\n fnc = []\n tnc = []\n tpm = []\n fpm = []\n fnm = []\n tnm = []\n corrm = []\n corrc = []\n\n y_true = np.array(results_dic[result_key][0]['true']).flatten()\n if type(mask) == type(None):\n mask = [True for _ in range(0, y_true.shape[0])]\n n_divisorm = 1\n p_divisorm = 1\n n_divisorc = 1\n p_divisorc = 1\n fractor = 1\n\n\n for i in results_dic[result_key].keys():\n preds = np.array([1 if el >=0.5 else 0 for el in results_dic[result_key][i]['y_hat']])\n compas = np.array(results_dic[result_key][i]['compas']).flatten()\n corrected = np.abs(compas-preds)\n if not abso or zafar:\n ## TP + FN\n n_divisorm = np.sum((corrected[mask] == 1) & (corrected[mask] == y_true[mask])) + np.sum(corrected[mask] < y_true[mask])\n ## TN + FP\n p_divisorm = np.sum((corrected[mask] == 0) & (corrected[mask] == y_true[mask])) +np.sum(corrected[mask] > y_true[mask])\n\n n_divisorc = np.sum((compas[mask] == 1) & (compas[mask] == y_true[mask])) + np.sum(compas[mask] < y_true[mask])\n p_divisorc = np.sum((compas[mask] == 0) & (compas[mask] == y_true[mask])) + np.sum(compas[mask] > y_true[mask])\n fractor = y_true.shape[0]\n\n ##false positive rate FPR = FP/N = FP/(FP+TN)\n fpm.append(np.sum(corrected[mask] > y_true[mask]) / p_divisorm)\n ##true negative rate FNR = FN/P = FN/(FN+TP)\n fnm.append(np.sum(corrected[mask] < y_true[mask]) / n_divisorm)\n\n fpc.append(np.sum(compas[mask] > y_true[mask]) / p_divisorc)\n fnc.append(np.sum(compas[mask] < y_true[mask]) / n_divisorc)\n corrm.append(np.sum(corrected==y_true)/fractor)\n corrc.append(np.sum(compas==y_true)/fractor)\n if not zafar:\n\n # true positive rate TPR = TP/P = TP/(TP+FN)\n tpm.append(np.sum((corrected[mask] == np.array(1)) & (y_true[mask] == np.array(1)))/n_divisorm)\n #true negative rate TPN = TN/N = TN/(TN+FP)\n tnm.append(np.sum((corrected[mask] == np.array(0)) & (y_true[mask] == np.array(0)))/p_divisorm)\n\n tpc.append(np.sum((compas[mask] == np.array(1)) & (y_true[mask] == np.array(1)))/n_divisorm)\n tnc.append(np.sum((compas[mask] == np.array(0)) & (y_true[mask] == np.array(0)))/p_divisorm)\n\n else:\n\n ##False Discovery FDR = FP/PP = FP/(TP+FP)\n tpm.append(np.sum(corrected[mask] > y_true[mask] )/(np.sum(corrected[mask] > y_true[mask]) + np.sum((corrected[mask] == 1) & (corrected[mask] == y_true[mask]))))\n\n ##False Omission FOR = FN/PN = FN/(FN+TN)\n tnm.append(np.sum(corrected[mask] < y_true[mask])/(np.sum(corrected[mask] < y_true[mask]) + np.sum((corrected[mask] == 0) & (corrected[mask] == y_true[mask]))))\n\n tpc.append(np.sum(compas[mask] > y_true[mask] )/(np.sum(compas[mask] > y_true[mask]) + np.sum((compas[mask] == 1) & (compas[mask] == y_true[mask]))))\n tnc.append(np.sum(compas[mask] < y_true[mask])/(np.sum(compas[mask] < y_true[mask]) + np.sum((compas[mask] == 0) & (compas[mask] == y_true[mask]))))\n\n\n\n\n\n return tpm, fpm, tnm, fnm, tpc, fpc, tnc, fnc, corrm, corrc\n\n\n\n\ndef bracketing_age(pds_age):\n ret = []\n for i in list(pds_age):\n if i <= 21:\n ret.append(str(0))\n elif i <= 30:\n ret.append(str(1))\n elif i <= 40:\n ret.append(str(2))\n elif i <= 50:\n ret.append(str(3))\n elif i <= 65:\n ret.append(str(4))\n elif i > 65:\n ret.append(str(5))\n else:\n raise SystemExit('value error')\n\n return ret\n\n\ndef gini_index_from_contingency(tp, fp, tn, fn):\n\n if type(tp) == type(list):\n total = len(tp + fp + tn + fn)\n pos = len(tp+fp)\n neg = len(fn+tn)\n else:\n total = sum([tp, fp, tn, fn])\n pos = sum([tp, fp])\n neg = sum([tn, fn])\n\n weight_p = len(tp+fp)/total\n weight_n = len(tn+fn)/total\n return (1- (fp/pos)**2 - (tp/pos)**2)*weight_p + (1-(fn/neg)**2 - (tn/neg)**2)*weight_n\n\n\ndef get_raw(perc):\n\n return np.log(perc/(1-perc))\n\n\n\ndef get_percentages(raw_scores, typ='logit'):\n\n return np.exp(raw_scores)/(np.exp(raw_scores)+1)\n\n np.log(np.exp(raw_scores)) - np.log(np.exp(raw_scores))\n\n\ndef get_cutoffs(raw_sores, deciles):\n\n raw_sores = np.array(raw_sores)\n deciles = np.array(deciles)\n decile_raw = {}\n\n for decile in np.unique(deciles):\n mask = deciles == decile\n un = np.unique(raw_sores[mask])\n maxi = np.max(un)\n decile_raw[decile] = (maxi, round(get_percentages(maxi),3)\n )\n\n return decile_raw\n\n\ndef exp_loss_discrete_estimate(distrib, cutoff_low=0, cutoff_high=10, decile_raw = None, is_percentage = False):\n divid = 0\n divis = 0\n if decile_raw:\n if cutoff_high != 10:\n cutoff_high = decile_raw[cutoff_high][0]\n else:\n cutoff_high = 10\n if cutoff_low != 10:\n cutoff_low = decile_raw[cutoff_low][0]\n else:\n cutoff_low = 10\n else:\n raise NotImplementedError('You have to provide a cutoff mapping')\n\n c = Counter(np.round(distrib, 2))\n for key, item in c.items():\n if key < cutoff_high and key >= cutoff_low:\n perc = key\n if not is_percentage:\n perc = get_percentages(key)\n divid += (perc*item)\n divis += item\n return divid/divis\n\ndef exp_loss_inverse_discrete_estimate(distrib, cutoff_low=0, cutoff_high=10, decile_raw = None, is_percentage = False):\n divid = 0\n divis = 0\n if decile_raw:\n if cutoff_high != 10:\n cutoff_high = decile_raw[cutoff_high][0]\n else:\n cutoff_high = 10\n\n if cutoff_low != 10:\n cutoff_low = decile_raw[cutoff_low][0]\n else:\n cutoff_low = 10\n else:\n raise NotImplementedError('You have to provide a cutoff mapping')\n\n c = Counter(np.round(distrib, 2))\n for key, item in c.items():\n if key < cutoff_high and key >= cutoff_low:\n perc = 1-key\n if not is_percentage:\n perc = 1-get_percentages(key)\n divid += (perc*item)\n divis += item\n return divid/divis\n\n\ndef exp_loss(cutoff_low, cutoff_high, distrib, decile_raw = None, is_percentage = False):\n if decile_raw:\n cutoff_high = decile_raw[cutoff_high][0]\n cutoff_low = decile_raw[cutoff_low][0]\n else:\n raise NotImplementedError('You have to provide a cutoff mapping')\n\n if not is_percentage:\n\n perc_low = get_percentages(cutoff_low)\n perc_high = get_percentages(cutoff_high)\n\n perc_values = get_percentages(distrib)\n\n kde = sci.stats.gaussian_kde(perc_values)\n #kde = sci.stats.norm(loc=0.5, scale=0.8)\n\n #res = sci.integrate.quad(lambda x: kde(x), 0.0, perc)[0]/sci.integrate.quad(lambda x: kde(x), 0.0, 1.0)[0]\n res = sci.integrate.quad(lambda x: kde(x), perc_low, perc_high)[0]\n\n return res\n\n\n\ndef calc_errors(pred, compas, y_true):\n # tensor has shape (samples, class) [0 or 1] Positive or Negative\n pred = torch.tensor(pred).reshape((-1,1))\n y_true = y_true.reshape((-1, 1))\n compas = compas.reshape((-1, 1))\n errors = torch.tensor(y_true[:, 0] != compas[:, 0]).reshape(\n (-1, 1)) ##find false positives and false negatives\n error_not_found = torch.lt(pred, 0.5) & errors\n\n err_falsely_found = torch.ge(pred, 0.5).float()*(~errors).float() # this mask finds those where compas did not make an error but our model thought it is an error\n\n\n FN = torch.sum(error_not_found*torch.eq(y_true, 1).float() + err_falsely_found*torch.eq(y_true, 1).float())\n FP = torch.sum(error_not_found*torch.eq(y_true, 0).float() + err_falsely_found*torch.eq(y_true, 0).float())\n\n return FN, FP\n\n\ndef calc_accuracy(pred, compas, y_true):\n pred = [1 if float(el) >= .5 else 0 for el in pred]\n compas = [el for el in compas]\n corrected = [float(np.abs(compas[i] - pred[i])) for i in range(len(pred))]\n\n return accuracy_score(corrected, y_true)\n\n\ndef total_correct(pred, compas, y_true):\n pred = [1 if float(el) >= .5 else 0 for el in pred]\n compas = [el for el in compas]\n corrected = [float(np.abs(compas[i] - pred[i])) for i in range(len(pred))]\n return torch.sum(torch.eq(torch.tensor(corrected), torch.tensor(y_true)).float())\n\ndef calc_error_accuracy(pred, compas, y_true):\n pred = np.array([1 if float(el) >= .5 else 0 for el in pred])\n\n errors = np.array(compas != y_true).flatten().astype('float')\n print('Correctly marked errors: {}'.format(np.sum((errors == 1) & (errors == pred))))\n print('Incorrectly marked errors: {}'.format(np.sum((errors == 0) & (errors != pred))))\n return accuracy_score(pred, errors)\n\n\n\ndef calc_fp_fn_rates(diff, ground_truth, errors ):\n pred =errors.float() - diff\n err_not_found = torch.lt(pred, 0.5).float() *errors.float()\n err_found = torch.ge(pred, 0.5).float() * errors.float()\n inv_ground = 1-ground_truth\n err_falsely_found = (~errors).float()*torch.ge(pred, 0.5).float()\n\n\n FN = torch.mean(err_not_found*ground_truth + err_falsely_found*ground_truth)\n FP = torch.mean(err_not_found*inv_ground + err_falsely_found*inv_ground)\n\n return FN, FP\n\n\ndef calc_y_hats_dic(y_hats, y_pred, key, cutoff, y, expected):\n y_pred_t = [float(torch.sigmoid(a)) for a in y_pred]\n y_hats[key][cutoff] = {}\n y_hats[key][cutoff]['expeted_rates'] = expected\n # y_hats[key][cutoff]['model'] = returner[\"model\"]\n y_hats[key][cutoff]['y_hat'] = torch.tensor(y_pred_t)\n y_hats[key][cutoff]['compas'] = y['group_recid_P']\n y_hats[key][cutoff]['true'] = y['recid']\n y_hats[key][cutoff]['compas_acc'] = accuracy_score(y['recid'], y['group_recid_P']) # 'group_recid_P' equals compas predictions interacted with cutoff\n y_hats[key][cutoff]['compas_fp'] = np.sum(y['recid'] < y['group_recid_P'])\n y_hats[key][cutoff]['compas_fn'] = np.sum(y['recid'] > y['group_recid_P'])\n # y_sanity = torch.tensor([0 for el in y_hats[key][cutoff]['y_hat']])\n y_hats[key][cutoff]['model_fp'], y_hats[key][cutoff]['model_fn'] = calc_errors(\n y_hats[key][cutoff]['y_hat'],\n torch.tensor(y['recid']),\n torch.tensor(y['group_recid_P']))\n\n acc = calc_accuracy(y_hats[key][cutoff]['y_hat'], y['group_recid_P'], y['recid'])\n print(acc)\n print('Number of COMPAS correct ones {}'.format(np.sum(y['recid'] == y['group_recid_P'])))\n print('Total number of correct ones: {}'.format(\n total_correct(y_hats[key][cutoff]['y_hat'], y['group_recid_P'], y['recid'])))\n y_hats[key][cutoff]['model_acc'] = acc\n\n return y_hats\n","repo_name":"mhschubert/Portfolio","sub_path":"compas/code/ml_utils/ml_utils.py","file_name":"ml_utils.py","file_ext":"py","file_size_in_byte":28384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12698848622","text":"import os\nimport sys\nfrom pathlib import Path\nimport shutil\nimport subprocess\nfrom git import Repo\nimport distutils.dir_util\n\nrepo_dir = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))\n\ngit_dir = Path(repo_dir) / \"git\"\nif git_dir.exists() and git_dir.is_dir():\n shutil.rmtree(git_dir)\ngit_dir.mkdir(parents=True, exist_ok=True)\n\ndoc_dir = Path(repo_dir) / \"source\"\n#distutils.dir_util.copy_tree(str(Path(repo_dir) / \"source\"), str(doc_dir))\n\n# clone and \"rsync\" to doc_dir\n\nrepos = ['polytope-client', 'polytope-deployment', 'polytope-server']\nfor repo in repos:\n Repo.clone_from(\"https://github.com/ecmwf-projects/\" + repo + \".git\", str(git_dir / repo))\n repo_doc_source = git_dir / repo / \"docs\" / \"source\"\n for x in next(os.walk(str(repo_doc_source)))[1]:\n distutils.dir_util.copy_tree(str(repo_doc_source / x), str(doc_dir / x))\n\n#os.chdir(str(doc_dir))\n\n# install polytope-server\n\nsys.path.insert(0, str(git_dir / \"polytope-server\" ))\n\nfrom polytope_server.version import __version__ as polytope_version\n\n# -- Project information\n\nproject = \"Polytope\"\ncopyright = \"2021, ECMWF\"\nauthor = \"ECMWF\"\n\nrelease = polytope_version\n\n# -- General configuration\n\nextensions = [\n \"sphinx.ext.duration\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"sphinx\": (\"https://www.sphinx-doc.org/en/master/\", None),\n}\nintersphinx_disabled_domains = [\"std\"]\n\ntemplates_path = [\"_templates\"]\n\n# -- Options for HTML output\n\nhtml_theme = \"sphinx_rtd_theme\"\n\n# -- Options for EPUB output\nepub_show_urls = \"footnote\"\n\nhtml_extra_path = [\"schemas\", \"static\"]\n\n\ndef setup(app):\n app.add_css_file(\"../my_theme.css\")\n","repo_name":"ecmwf-projects/polytope-docs","sub_path":"source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9117256198","text":"import re\nfrom collections import defaultdict\n\nimport textx\nfrom textx import metamodel_from_str\n\nfrom clchecker.constants import BASETYPE\nfrom clchecker.errors import CLSemanticError, CLSyntaxError\nfrom clchecker.store import Store\nfrom config import COMMON_COMMANDS\n\n\nclass CLchecker():\n\n def __init__(self, store):\n self.store = store\n self.metamodel_doc_cache = {}\n self.init_metamodel_doc_cache()\n self.new_lines_start = []\n \n def init_metamodel_doc_cache(self):\n for command_name in COMMON_COMMANDS:\n command_doc = self.store.findcommand(command_name)\n if command_doc:\n command_metamodel = metamodel_from_str(command_doc.tx_syntax +\n BASETYPE,\n autokwd=False)\n self.metamodel_doc_cache[command_name] = (command_metamodel,\n command_doc)\n\n def assign_name_attr_to_actual_value(self, txobj):\n clsname = txobj.__class__.__name__\n if clsname == \"str\":\n return txobj\n if clsname == \"list\":\n name_attr = ''.join(\n [self.assign_name_attr_to_actual_value(obj) for obj in txobj])\n return name_attr\n if not hasattr(txobj, '_tx_attrs'):\n return \"\"\n\n name_attr = \"\".join([\n self.assign_name_attr_to_actual_value(getattr(txobj, att))\n for att in txobj._tx_attrs\n ])\n txobj.name = name_attr\n return name_attr\n\n def check_after_before_always_mutex(self, ref, specs):\n # if ref is an OptionPair_key, it is not clear enough\n if ref in self.occured_OptionPair_to_option_key:\n clearer_ref = self.occured_OptionPair_to_option_key[ref]\n else:\n clearer_ref = ref\n fired_obj = self.ref_to_txobj[clearer_ref]\n start_line, start_col = self.model._tx_parser.pos_to_linecol(\n fired_obj._tx_position)\n end_line, end_col = self.model._tx_parser.pos_to_linecol(\n fired_obj._tx_position_end)\n position = {\n \"start_line\": start_line,\n \"start_col\": start_col,\n \"end_line\": end_line,\n \"end_col\": end_col,\n \"abs_start\": fired_obj._tx_position,\n \"abs_end\": fired_obj._tx_position_end\n }\n for rule_type in ('after', 'before', 'always', 'mutex'):\n if rule_type in specs:\n rule_spec = specs[rule_type]\n if 'all_must_present' in rule_spec:\n if rule_type == 'always':\n excepted_but_not_occur_refs = [\n r for r in rule_spec['all_must_present']\n if r not in self.all_occured_refs\n ]\n if excepted_but_not_occur_refs:\n # todo: if the not occured ref is a OptionPair_key, it is not clear enough.\n expected_string = \",\".join(\n excepted_but_not_occur_refs)\n\n raise CLSemanticError(\n f'Expect `{expected_string}` when `{clearer_ref}` occurs',\n **position,\n severity=\"Error\")\n if rule_type == 'mutex':\n not_excepted_but_occur_refs = [\n r for r in rule_spec['all_must_present']\n if r in self.all_occured_refs\n ]\n\n # make not_excepted_but_occur_refs more clear when a OptionPair_key is inside\n if not_excepted_but_occur_refs:\n for i in range(len(not_excepted_but_occur_refs)):\n r = not_excepted_but_occur_refs[i]\n if r in self.occured_OptionPair_to_option_key:\n not_excepted_but_occur_refs[\n i] = self.occured_OptionPair_to_option_key[\n r]\n\n expected_string = \",\".join(\n not_excepted_but_occur_refs)\n raise CLSemanticError(\n f\"`{expected_string}` and `{clearer_ref}` can't occur at the same time\",\n **position,\n severity=\"Error\")\n\n # todo: before, after\n if 'one_must_present' in rule_spec:\n if rule_type == 'always':\n has = False\n for r in self.all_occured_refs:\n if r in rule_spec['one_must_present']:\n has = True\n break\n if not has:\n raise CLSemanticError(\n f'except one of `{\" | \".join(rule_spec[\"one_must_present\"])}` when `{clearer_ref}` occurs',\n **position,\n severity=\"Error\")\n\n # todo before, mutex, after\n def get_abs_position(self, line_num, col_num):\n return self.new_lines_start[line_num - 1] + col_num - 1\n\n\n def get_abs_position_from_commandline(self, commandline, line_num, col_num):\n lines = commandline.split('\\n')\n return sum([0]+[len(l)+1 for l in lines[:line_num-1]]) + col_num - 1\n\n def convert_pos_to_linecol(self, abs_pos):\n '''Use binary search to find the line number.\n don't `have to` use binary search since len(self.new_lines_start) is usually smaller than 10'''\n l, r = 0, len(self.new_lines_start) - 1\n while l <= r:\n if l == r:\n line = l\n break\n mid = l + (r - l) // 2\n if self.new_lines_start[mid] <= abs_pos and self.new_lines_start[\n mid + 1] > abs_pos:\n line = mid\n break\n elif self.new_lines_start[mid] > abs_pos:\n r = mid - 1\n else:\n l = mid + 1\n # line number counts from 1\n return line + 1, abs_pos - self.new_lines_start[line] + 1\n\n def get_position_from_obj(self, obj):\n start_line, start_col = self.convert_pos_to_linecol(obj._tx_position)\n end_line, end_col = self.convert_pos_to_linecol(obj._tx_position_end)\n position = {\n \"start_line\": start_line,\n \"start_col\": start_col,\n \"end_line\": end_line,\n \"end_col\": end_col,\n \"abs_start\": obj._tx_position,\n \"abs_end\": obj._tx_position_end\n }\n return position\n\n def get_position_from_abs(self, abs_start, abs_end):\n start_line, start_col = self.convert_pos_to_linecol(abs_start)\n end_line, end_col = self.convert_pos_to_linecol(abs_end)\n position = {\n \"start_line\": start_line,\n \"start_col\": start_col,\n \"end_line\": end_line,\n \"end_col\": end_col,\n \"abs_start\": abs_start,\n \"abs_end\": abs_end\n }\n return position\n\n def update_ref_to_txobj(self, txobj):\n \"\"\"\n recursively find the ref of any txobj in the model.\n ref can be an option_key or a sub_command's value\n \"\"\"\n if hasattr(txobj, '_tx_attrs'):\n for name, attr in txobj._tx_attrs.items():\n if name != 'content' and attr.cont and getattr(txobj, name):\n obj = getattr(txobj, name)\n clsname = obj.__class__.__name__\n\n # ShortOption_2:\n # option_key=\"-y\"\n # ;\n # here clsname of option_key is \"str\" which means we hit the leaf.\n # there is no tx_obj anymore and we also not interested in \"str\" class.\n if clsname == \"str\":\n continue\n\n # OneMustPresentCollection_1_Multi:\n # statements+=OneMustPresentCollection_1\n # ;\n # todo: is the following correct? Now I change to check all elements in the list.\n # here the clsname of `statements` is \"list\", all elements in the list belong to the same class\n # so we only need to check the first element. Also, we are not interested in the \"list\" class itself.\n if clsname == \"list\":\n for ob in obj:\n self.update_ref_to_txobj(ob)\n continue\n\n # if clsname is SubCommand or Option, we only allow it happen once\n if clsname.startswith('SubCommand') or clsname.startswith(\n 'ShortOption') or clsname.startswith('LongOption'):\n if clsname.startswith('SubCommand'):\n ref = obj.value\n # when obj is a ShortOption, we need to add '-' to its option_key\n elif clsname.startswith('ShortOption'):\n # there are 2 kinds of ShortOpiton:\n # The first one doesn't have value attribute, but option_key. it's option_key doesn't start with '-'\n # The second one has the value attribute. It's option_key usually start with '-'\n # For example, in 'apt-get', 'y' is the first kind. '-t' is the second kind since it should followed by target release.\n if not hasattr(obj, 'value'):\n ref = '-' + obj.option_key\n else:\n ref = obj.option_key\n # when obj is a LongOption, obj.option_key has already had \"--\" before it\n else:\n ref = obj.option_key\n # the ref now happen multiple time, check whether we need to report it as error\n if ref in self.ref_to_txobj:\n need_to_report = True\n # if it is an option and has value attribute\n if (clsname.startswith('ShortOption') or clsname.startswith('LongOption')):\n OptionPair_key = self.option_keys_to_OptionPair_key[\n ref]\n for option_key, readable_syntax in self.OptionPair_key_to_option_keys_and_readable_syntax[\n OptionPair_key]:\n if option_key == ref and \"+=\" in readable_syntax:\n need_to_report = False\n break\n if need_to_report:\n position = self.get_position_from_obj(obj)\n raise CLSyntaxError(\n f\"{ref} has presented previous in the `{self.command_name}` command\",\n **position,\n severity='Warning')\n self.ref_to_txobj[ref] = obj\n if clsname.startswith('ShortOption') or clsname.startswith(\n 'LongOption'):\n # when obj is a ShortOption, we need to add '-' to its option_key\n if clsname.startswith('ShortOption'):\n if not hasattr(obj, 'value'):\n # since obj doesn't have value attribute, it is the first kind. We should add \"-\" to its option_key\n option_key = \"-\" + obj.option_key\n else:\n option_key = obj.option_key\n else:\n option_key = obj.option_key\n OptionPair_key = self.option_keys_to_OptionPair_key[\n option_key]\n # the same OptionPair_key can only occur once except there is a '+=' in the option_key readable syntax\n if OptionPair_key in self.occured_OptionPair_to_option_key:\n need_to_report = True\n for option_key_, readable_syntax in self.OptionPair_key_to_option_keys_and_readable_syntax[\n OptionPair_key]:\n if option_key == option_key_ and \"+=\" in readable_syntax:\n need_to_report = False\n break\n if need_to_report:\n position = self.get_position_from_obj(obj)\n same_option_keys_and_readable_syntaxes = self.OptionPair_key_to_option_keys_and_readable_syntax[\n OptionPair_key]\n readable_syntaxes = [\n i[1] for i in\n same_option_keys_and_readable_syntaxes\n ]\n raise CLSyntaxError(\n F\"Only one of `{' | '.join(readable_syntaxes)}` is enough, since they have the same meaning\",\n **position,\n severity='Warning')\n self.occured_OptionPair_to_option_key[\n OptionPair_key] = option_key\n\n self.update_ref_to_txobj(obj)\n\n def pre_process_commandline(self, commandline):\n\n # replace windows-style newline with unix-style newline\n commandline = commandline.replace('\\r\\n', ' \\n')\n\n # handle escaped newline\n self.new_lines_start = [0] + [\n m.end() for m in re.finditer(r'\\\\?\\n', commandline)\n ]\n commandline = commandline.replace('\\\\\\n', ' ')\n return commandline\n\n def check(self, command_name, commandline, debug=False):\n # print(commandline.encode())\n commandline = self.pre_process_commandline(commandline)\n command_metamodel = None\n if command_name in self.metamodel_doc_cache:\n command_metamodel, command_doc = self.metamodel_doc_cache[command_name]\n else:\n command_doc = self.store.findcommand(command_name)\n if command_doc:\n command_metamodel = metamodel_from_str(command_doc.tx_syntax +\n BASETYPE,\n autokwd=False)\n self.metamodel_doc_cache[command_name] = (command_metamodel,\n command_doc)\n if command_metamodel:\n try:\n model = command_metamodel.model_from_str(commandline)\n except textx.TextXSyntaxError as e:\n abs_start = self.get_abs_position_from_commandline(commandline, e.line, e.col)\n position = self.get_position_from_abs(abs_start, abs_start)\n raise CLSyntaxError(e.message,\n **position,\n expected_rules=e.expected_rules,\n severity=\"Error\")\n except textx.TextXSemanticError as e:\n abs_start = self.get_abs_position_from_commandline(commandline, e.line,\n e.col)\n position = self.get_position_from_abs(abs_start, abs_start)\n raise CLSemanticError(e.message,\n **position,\n err_type=e.err_type,\n expected_obj_cls=e.expected_obj_cls,\n severity=\"Error\")\n if debug:\n self.assign_name_attr_to_actual_value(model)\n self.model = model\n self.command_name = command_name\n self.ref_to_txobj = {}\n self.clsname_to_readable_syntax = command_doc.clsname_to_readable_syntax\n\n self.concrete_specs = command_doc.concrete_specs\n self.OptionPair_key_to_option_keys_and_readable_syntax = self.concrete_specs[\n 'OptionPair_key_to_option_keys_and_readable_syntax']\n self.option_keys_to_OptionPair_key = self.concrete_specs[\n 'option_keys_to_OptionPair_key']\n\n self.occured_OptionPair_to_option_key = {}\n\n self.update_ref_to_txobj(model)\n self.all_occured_refs = list(self.ref_to_txobj.keys()) + list(\n self.occured_OptionPair_to_option_key.keys())\n #TODO: differentiate OptionPair_key and option_key in all_occured_refs\n for ref in self.all_occured_refs:\n if ref in self.concrete_specs:\n self.check_after_before_always_mutex(\n ref, self.concrete_specs[ref])\n\n def find_explanation(self, command_name, word):\n command_doc = self.store.findcommand(command_name)\n if command_doc:\n Pair_key = None\n found_key = word\n if word in command_doc.concrete_specs[\n 'explanation_key_to_ExplanationPair_key']:\n Pair_key = command_doc.concrete_specs[\n 'explanation_key_to_ExplanationPair_key'][word]\n elif word in command_doc.concrete_specs[\n 'option_keys_to_OptionPair_key']:\n Pair_key = command_doc.concrete_specs[\n 'option_keys_to_OptionPair_key'][word]\n elif (\"-\"+word) in command_doc.concrete_specs[\n 'option_keys_to_OptionPair_key']:\n found_key = \"-\" + word\n Pair_key = command_doc.concrete_specs[\n 'option_keys_to_OptionPair_key'][found_key]\n if Pair_key:\n return found_key, command_doc.explanation[Pair_key]\n return None, None\n","repo_name":"wangluochao902/clcheck","sub_path":"clchecker/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":18413,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"29749405555","text":"import os\nimport yaml\n\ndef load_inputs(file: str = '.yaml') -> dict:\n PARAMS_YAML = file\n if os.path.isfile(PARAMS_YAML):\n with open(PARAMS_YAML, 'r') as f:\n try:\n inputs = yaml.safe_load(f)\n except yaml.YAMLError as e:\n raise Exception(e)\n else:\n raise Exception('No input yaml found.')\n return inputs","repo_name":"cclark20/nbreporter","sub_path":"nbreporter/nb_helpers.py","file_name":"nb_helpers.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18554537466","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as tvmodels\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport logging\nimport time\nimport shutil\n\nimport random\nfrom classify import *\nfrom utils import *\n\n\n\nif __name__ == \"__main__\":\n\n global args\n file = \"./config/classify\" + \".json\"\n args = load_json(json_file=file)\n\n model_name = args['dataset']['model_name']\n model_name_T = \"VGG16\"\n model_name_E = \"FaceNet\"\n dataset_name = \"celeba\"\n \n z_dim = 100\n\n # path_T = '/home/sichen/models/target_model/VGG16/model_latest.pth'\n # path_E = '/home/sichen/models/target_model/FaceNet/model_latest.pth'\n path_T = '/home/sichen/models/target_model/target_ckp/VGG16_88.26.tar'\n path_E = '/home/sichen/models/yuheng/FaceNet.tar'\n\n train_path = args['dataset']['train_file_path']\n val_path = args['dataset']['test_file_path']\n # lr = args[model_name]['lr']\n batch_size = args[model_name]['batch_size']\n\n ###########################################\n ########### load model ##########\n ###########################################\n # no mask\n # G = Generator(z_dim)\n # torch.nn.DataParallel(G).cuda()\n # D = DGWGAN(3)\n # torch.nn.DataParallel(D).cuda()\n # ckp_G = torch.load(path_G)\n # load_my_state_dict(G, ckp_G['state_dict'])\n # ckp_D = torch.load(path_D)\n # load_my_state_dict(D, ckp_D['state_dict'])\n\n if model_name_T.startswith(\"VGG16\"):\n T = VGG16(1000)\n E = FaceNet(1000)\n\n T= torch.nn.DataParallel(T).cuda()\n ckp_T = torch.load(path_T)\n E = torch.nn.DataParallel(E).cuda()\n ckp_E = torch.load(path_E)\n\n if 0:\n print(\"Pre-trained (ckp_E) state_dict:\")\n n = 0\n for k,v in ckp_T['state_dict'].items():\n print ('idx = %d' %n, k, v.shape)\n n += 1\n \n #NOTE: added by CCJ:\n # Print model's state_dict\n print(\"\\n\\nModel state_dict:\")\n n = 0\n for param_tensor in T.state_dict():\n print('idx = ', n, \"\\t\", param_tensor, \"\\t\", T.state_dict()[param_tensor].size())\n n += 1\n # if 'module.' in param_tensor:\n # tmp_k = param_tensor[len('module.'):]\n # else:\n # tmp_k = param_tensor\n tmp_k = param_tensor\n if tmp_k not in ckp_T['state_dict']:\n print (\"not found:\", tmp_k)\n\n T.load_state_dict(ckp_T['state_dict'], strict=False)\n \n E.load_state_dict(ckp_E['state_dict'], strict=False)\n\n # train_set, train_loader = init_dataloader(args, train_path, batch_size, mode=\"train\")\n val_set, val_loader = init_dataloader(args, val_path, batch_size, mode=\"test\")\n\n criterion = nn.CrossEntropyLoss().cuda()\n T.eval()\n E.eval()\n\n \n\n # print(\"---------------------Test [%s] accuracy------------------------------\" % model_name)\n # # train set\n # for i, (imgs, iden) in enumerate(train_loader):\n # # iden = iden.view(-1).long().cuda()\n # x = imgs.cuda()\n # iden = iden.cuda()\n # img_size = x.size(2)\n # bs = x.size(0)\n # # out = T(x)[-1]\n # out = E(low2high(x))[-1]\n\n # eval_iden = torch.argmax(out, dim=1).view(-1)\n # train_acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs\n # loss = criterion(out, iden)\n\n # print(\"training acc:\", train_acc)\n\n # test set\n \n total_acc = 0\n for i, (imgs, iden) in enumerate(val_loader):\n x = imgs.cuda()\n iden = iden.cuda()\n img_size = x.size(2)\n bs = x.size(0)\n out = T(x)[-1]\n # out = E(low2high(x))[-1]\n\n eval_iden = torch.argmax(out, dim=1).view(-1)\n val_acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs\n total_acc += val_acc\n # loss = criterion(out, iden)\n print(\"val acc:\", val_acc)\n\n aver_acc = total_acc / (i+1)\n print(\"average val acc:\", aver_acc)","repo_name":"SCccc21/mi","sub_path":"GMI-code/Celeba/test_acc.py","file_name":"test_acc.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34121521902","text":"import requests\nfrom bs4 import BeautifulSoup\nimport yfinance as yf\n\ndef retornar_acoes_existentes():\n\n page = requests.get(\"https://www.infomoney.com.br/cotacoes/empresas-b3/\")\n soup = BeautifulSoup(page.content, \"html.parser\")\n lista_ativo = soup.find(class_=\"col-md-9 col-lg-8 col-xl-6 m-sm-auto m-lg-0 article-content\")\n codigo_ativo = lista_ativo.find_all(\"td\", class_=\"strong\")\n\n resultados = []\n\n for codigos_ativos in codigo_ativo:\n\n if ( codigos_ativos.text.strip()[-1:] == \"F\" ) or ( codigos_ativos.text.strip() == \"\" ) : continue\n\n resultados.append(codigos_ativos.text.strip()+\".SA\")\n\n return resultados\n\n\ndef retornar_fiis_existentes():\n\n page = requests.get(\"https://www.fundsexplorer.com.br/funds\")\n soup = BeautifulSoup(page.content, \"html.parser\")\n lista_ativo = soup.find(id=\"search-menu-select\")\n codigo_ativo = lista_ativo.find_all(\"option\")\n\n resultados = []\n\n for codigos_ativos in codigo_ativo:\n\n resultados.append(codigos_ativos.text.strip()[:6]+\".SA\")\n \n return resultados\n\n\ndef retornar_detalhes_ativo_antigo(ativo):\n\n nome_empresa, setor, website = \"\", \"\", \"\"\n \n ativo_ = yf.Ticker(ativo)\n\n try:\n ativo_.info[\"longName\"]\n except KeyError:\n codigo_ativo = ativo\n else:\n codigo_ativo = ativo\n nome_empresa = ativo_.info[\"longName\"]\n setor = ativo_.info[\"sector\"]\n website = ativo_.info[\"website\"] \n\n return codigo_ativo, nome_empresa, setor, website\n\n\n\ndef retornar_todos_ativos():\n\n ...\n\n return retornar_acoes_existentes(), retornar_fiis_existentes()\n\ndef retornar_detalhes_ativo(ativo):\n\n nome_empresa_longo, nome_empresa_curto, setor, website, tipo_ativo = \"\", \"\", \"\", \"\", \"\"\n \n ativo_ = yf.Ticker(ativo)\n\n try:\n ativo_.info[\"longName\"]\n except KeyError:\n codigo_ativo = ativo\n ativo_existente = 0\n else:\n codigo_ativo = ativo\n nome_empresa_longo = ativo_.info[\"shortName\"]\n nome_empresa_curto = ativo_.info[\"longName\"]\n \n setor = ativo_.info[\"sector\"]\n ativo_existente = 1\n website = ativo_.info[\"website\"]\n tipo_ativo = \"acao\"\n if nome_empresa_longo.find(\"Fundo\") > -1: tipo_ativo = \"fii\"\n\n return codigo_ativo, nome_empresa_longo, nome_empresa_curto, setor, website, tipo_ativo, ativo_existente\n\n\n\n\n\n#teste = retornar_acoes_existentes()\n\n\n\n\n#for i in teste:\n \n #print(retornar_detalhes_ativo(i))","repo_name":"mtcdultra/p.obter_ativos","sub_path":"obter_ativos/retornar_ativos.py","file_name":"retornar_ativos.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"46730770303","text":"from scipy.stats import norm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport click\nimport logging\nfrom utils import *\nimport plotly.express as px\n\n\n@click.command()\n@click.option('--path', '-p', help=\"The path to the file and file name containing the data\")\ndef run(path=None):\n \n #Upload db\n FIXTURE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),'resources')\n params = DataExtraction(FIXTURE_DIR)\n ## Get setting \n if not path:\n run_settings = GetSettings(FIXTURE_DIR, params.db)\n \n \n params.get_params(run_settings.input_path)\n \n annual_revenue_MC = RevenueCalculator(params.cotract_price_range, params.annual_prod, params.degredation, params.years, params.yeild, \\\n params.inflation )\n \n if isinstance(params.cotract_price_range, list):\n if len(params.cotract_price_range)==1:\n price_vec = params.cotract_price_range\n else:\n price_vec = annual_revenue_MC.prep_energy_prices_contract(plot=False) \n else: \n price_vec = [params.cotract_price_range] \n \n \n annual_revenue_mat = annual_revenue_MC.annual_revenue(price_vec, contract='agreement', plot=True)\n \n cash_flow_calculator = CashFlowCalculator(annual_revenue_mat, price_vec, params)\n\n interest_rate_MC, annual_cash_flow, annual_res = cash_flow_calculator.calc_cash_flow()\n\n metrics = Metrics(annual_cash_flow, params)\n \n\n\n irr_vec , npv_vec, payback_vec, no_payback_count = metrics.metrics_calculator()\n # print(irr_vec, npv_vec, payback_vec)\n irr_vec = np.array(irr_vec)\n npv_vec = np.array(npv_vec)\n payback_vec=np.array(payback_vec, dtype=np.float64)\n\n voltality = np.std(npv_vec)/np.sqrt(len(npv_vec))\n print('voltality: ', voltality)\n print('mean IRR: ', np.mean(irr_vec[~np.isnan(irr_vec)]))\n print('mean NPV: ', np.mean(npv_vec[~np.isnan(irr_vec)]))\n print('mean Payback: ', np.mean(payback_vec[~np.isnan(payback_vec)]))\n\n\n treshold = 0.02\n prob_lower_then_threshold = sum(irr1:\n # figure = px.histogram(res, x=\"irr_vec\")\n # figure.show()\n if len(annual_res)==1:\n for key in annual_res:\n cur_file = pd.DataFrame(annual_res[key])\n cur_file.to_csv(f'{run_settings.output_path}\\{run_settings.run_name}_annual_calculation.csv')\n\nif __name__ == \"__main__\":\n run()","repo_name":"libyair/MC-REP-Analysis","sub_path":"irr_report/irr_report.py","file_name":"irr_report.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9928146029","text":"def StrSpn(main_string: str, string_chars: str):\r\n main_string = main_string.split(' ')\r\n string_chars.replace(\" \", \"\")\r\n for string in main_string:\r\n good_string = True\r\n for char in string:\r\n if not string_chars.__contains__(char):\r\n # символ зі слова не знаходиться у рядку із символами\r\n good_string = False\r\n if good_string:\r\n return \"Довжина першого слова, усі символи якого містяться у рядочку з символами: \" + str(len(string))\r\n return 'У рядочку s немає таких слів, які б містили тільки символи з рядка s1 '\r\n\r\n\r\nprint(\"Побєдімська Соня Ігорівна\\nЛабораторна робота №4\\nВаріант 17\\nВизначення довжини тієї частини рядка s, \"\r\n \"яка містить тільки символи з рядка s1\\n\")\r\nend = \"\"\r\nwhile end == \"\":\r\n s = input('��ведіть рядок, якій будемо перевіряти: ')\r\n s1 = input(\"Введіть рядок з символами: \")\r\n print(StrSpn(s, s1))\r\n end = input(\"Введіть будь-що для завершення або ENTER для продовження \")\r\n","repo_name":"pobiedimska/python-laboratory","sub_path":"laboratory4/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"40786254972","text":"import numpy\nfrom numba import jit, float64\n\n@jit(\"void(float64[:,:], float64[:,:], float64[:,:])\")\ndef filter2d_core(image, filt, result):\n M, N = image.shape\n Mf, Nf = filt.shape\n Mf2 = Mf // 2\n Nf2 = Nf // 2\n for i in range(Mf2, M - Mf2):\n for j in range(Nf2, N - Nf2):\n num = 0\n for ii in range(Mf):\n for jj in range(Nf):\n num += (filt[Mf - 1 - ii, Nf - 1 - jj] * image[i - Mf2 + ii, j - Nf2 + jj])\n result[i, j] = num\n\ndef filter2d(image, filt):\n result = numpy.zeros_like(image)\n filter2d_core(image, filt, result)\n return result\n","repo_name":"maropu/lljvm-translator","sub_path":"python/src/test/resources/pyfunc/numba_examples/blur_image.py","file_name":"blur_image.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"62"} +{"seq_id":"15516023917","text":"class SingleLinkedListNode(object):\n\tdef __init__(self, value, nxt):\n\t\tself.value = value\n\t\tself.nxt = nxt\n\n\t# Prints debugging output when repr() called on the node object\n\tdef __repr__(self):\n\t\tnval = self.nxt and self.nxt.value or None\n\t\treturn \"{}:{}\".format(self.value, repr(nval))\n\n\nclass SingleLinkedList(object):\n\tdef __init__(self):\n\t\tself.begin = None\n\t\tself.end = None\n\n\tdef push(self, obj):\n\t\t\"\"\"Appends new value to end of list.\"\"\"\n\t\tnode = SingleLinkedListNode(obj, None)\n\t\tif self.begin == None:\n\t\t\tself.begin = node\n\t\t\tself.end = self.begin\n\t\telse:\n\t\t\tself.end.nxt = node\n\t\t\tself.end = node\n\t\t\tassert self.begin != self.end\n\n\t\tassert self.end.nxt == None\n\n\tdef pop(self):\n\t\t\"\"\"Removes last item and returns it.\"\"\"\n\t\tif self.end == None:\n\t\t\treturn None\n\n\t\t# case for only 1 node\n\t\tif self.begin == self.end:\n\t\t\tpopped = self.end\n\t\t\tself.begin = None\n\t\t\tself.end = None\n\t\t\treturn popped.value\n\t\telse:\n\t\t\tpopped = self.end\n\t\t\tnode = self.begin\n\t\t\t# Below takes you up to second last node, before the last node to be removed\n\t\t\twhile node.nxt != self.end:\n\t\t\t\tnode = node.nxt\n\t\t\t# Check that the node is not the last node\n\t\t\tassert self.end != node\n\t\t\tself.end = node\n\t\t\tnode.nxt = None\n\t\t\treturn popped.value\n\n\n\tdef shift(self, obj):\n\t\t\"\"\"Append to beginning of list\"\"\"\n\t\tnode = SingleLinkedListNode(obj, self.begin)\n\t\tif self.count() == 0:\n\t\t\tself.end = node\n\t\tself.begin = node\n\n\n\tdef unshift(self):\n\t\t\"\"\"Removes first item and returns it.\"\"\"\n\t\tif self.begin == None:\n\t\t\treturn None\n\n\t\tif self.begin == self.end:\n\t\t\tpopped = self.begin\n\t\t\tself.begin = None\n\t\t\tself.end = None\n\t\t\treturn popped.value\n\t\telse:\n\t\t\tpopped = self.begin\n\t\t\tnode = popped.nxt\n\t\t\tself.begin = node\n\t\t\treturn popped.value\n\n\n\tdef remove(self, obj):\n\t\t\"\"\"Finds matching item and remove from list. Returns index of item\"\"\"\n\t\tif not self.begin:\n\t\t\treturn None\n\n\t\tnode = self.begin\n\t\tindex = 0\n\t\twhile node.value != obj:\n\t\t\tif node == self.end:\n\t\t\t\treturn None\n\t\t\tnode = node.nxt\n\t\t\tindex += 1\n\n\t\tif index == 0:\n\t\t\tself.unshift()\n\t\t\treturn 0\n\n\t\tif index == self.count() - 1:\n\t\t\tself.pop()\n\t\t\treturn index\n\t\telse:\n\t\t\tprevNode = self.begin\n\t\t\twhile prevNode.nxt != node:\n\t\t\t\tprevNode = prevNode.nxt\n\t\t\tprevNode.nxt = node.nxt\n\t\t\treturn index\n\n\n\tdef first(self):\n\t\t\"\"\"Returns a reference to first item, does not remove it.\"\"\"\n\t\tif not self.begin:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self.begin.value\n\n\tdef last(self):\n\t\t\"\"\"Returns a reference to last item, does not remove it.\"\"\"\n\t\tif not self.end:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self.end.value\n\n\tdef count(self):\n\t\t\"\"\"Counts number of elements in list.\"\"\"\n\t\tcount = 0\n\t\tnode = self.begin\n\t\twhile node:\n\t\t\tcount += 1\n\t\t\tnode = node.nxt\n\t\treturn count\n\n\tdef get(self, index):\n\t\t\"\"\"Get value at index.\"\"\"\n\t\tif not self.begin:\n\t\t\treturn None\n\n\t\tif index > self.count() - 1:\n\t\t\treturn None\n\n\t\tnode = self.begin\n\t\tcounter = 0\n\t\twhile counter < index:\n\t\t\tnode = node.nxt\n\t\t\tcounter += 1\n\t\treturn node.value\n\n\tdef dump(self, mark):\n\t\t\"\"\"Debugging function that dumps contents of a list.\"\"\"\n\t\tprint(mark)\n\n\t\tif not self.begin:\n\t\t\tprint(\"Empty\")\n\t\telse:\n\t\t\tnode = self.begin\n\t\t\tprint(node)\n\t\t\twhile node != self.end:\n\t\t\t\tnode = node.next\n\t\t\t\tprint(node)","repo_name":"cheeyeo/learn_more_python_the_hard_way","sub_path":"chapter13_single_linked_lists/sllist.py","file_name":"sllist.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"33019313162","text":"import logging\nimport os\n\nBOT_HISTORY_LENGTH = os.getenv(\"BOT_HISTORY_LENGTH\", 20)\n\n# Log configuration\nlogger = logging.getLogger(\"gpt_chat\")\nlogger.setLevel(os.getenv(\"LOG_LEVEL\", \"INFO\"))\n\n# Telegram bot configuration\nBOT_TOKEN = os.getenv(\"BOT_TOKEN\")\nif not BOT_TOKEN:\n logging.error(\n \"BOT_TOKEN env var is not found, cannot start the bot without it, create it with @BotFather Telegram bot! \"\n )\nelse:\n logging.info(\"BOT_TOKEN found, starting the bot\")\n\nDEFAULT_MODEL_NAME = \"gpt-3.5-turbo\"\nMODEL_NAME = os.getenv(\"MODEL_NAME\")\nif not MODEL_NAME:\n MODEL_NAME = DEFAULT_MODEL_NAME\n logging.info(f\"MODEL_NAME env var is not found, using default model {MODEL_NAME}\")\nelse:\n logging.info(f\"MODEL_NAME is {MODEL_NAME}\")\n","repo_name":"galinaalperovich/chatgpt-api-tg-bot","sub_path":"chatgpt_bot/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"62"} +{"seq_id":"27092016429","text":"\"\"\"This module contains a helper function for buidling optimiser.\"\"\"\nfrom typing import Iterator, Tuple\nimport torch\nfrom torch.nn import Parameter\nfrom .model_args import ModelArgs\n\n\ndef build_optimizer(\n args: ModelArgs, params: Iterator[Parameter]\n) -> Tuple[torch.optim.lr_scheduler._LRScheduler, torch.optim.Optimizer]:\n \"\"\"Returns pytorch optimiser for given configurations.\n\n Args:\n args (ModelArgs): ModelArgs object with model's configurations.\n params (Iterator[Parameter]): Iterator with model's parameters, which\n are returned by '.parameters()' function from pytorch model.\n\n Returns:\n torch.optim.Optimizer: Optimiser for given configurations\n and parameters.\n \"\"\"\n weight_decay = args.weight_decay\n filter_fn = filter(lambda p: p.requires_grad & (not p.grad_fn), params)\n if args.optim == \"adam\":\n optimizer = torch.optim.Adam(\n filter_fn, lr=args.lr, weight_decay=weight_decay\n )\n elif args.optim == \"sgd\":\n optimizer = torch.optim.SGD(\n filter_fn,\n lr=args.lr,\n momentum=0.95,\n weight_decay=weight_decay,\n )\n elif args.optim == \"rmsprop\":\n optimizer = torch.optim.RMSprop(\n filter_fn, lr=args.lr, weight_decay=weight_decay\n )\n elif args.optim == \"adagrad\":\n optimizer = torch.optim.SGD(\n filter_fn, lr=args.lr, weight_decay=weight_decay\n )\n if args.opt_scheduler == \"none\":\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate\n )\n elif args.opt_scheduler == \"cos\":\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, T_max=args.opt_restart\n )\n return scheduler, optimizer\n","repo_name":"konradmy/ts-transformers","sub_path":"ts_transformers/training_utils/optim_builder.py","file_name":"optim_builder.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70136115717","text":"from functools import partial\r\nfrom collections import OrderedDict\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision\r\nfrom torchvision import datasets,transforms,models\r\n\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\nfrom torch.autograd import Variable # torch 中 Variable 模块\r\n\r\n#%matplotlib inline\r\ndata_dir = \"C:/Users/a1510/Desktop/cats_and_dogs_small\"\r\n\r\n\r\n\r\ndata_tansform = { x:transforms.Compose([transforms.Resize([224,224]), # 固定图像大小\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[.5,.5,.5],std=[.5,.5,.5])])\r\n for x in [\"train\",\"valid\"]}\r\nimage_datasets = {x:datasets.ImageFolder(root=os.path.join(data_dir,x),\r\n transform = data_tansform[x])\r\n for x in [\"train\",\"valid\"]}\r\ndataloader = {x:torch.utils.data.DataLoader(dataset=image_datasets[x],\r\n batch_size=16,\r\n shuffle=True)\r\n for x in [\"train\",\"valid\"]}\r\n# 获取一个批次,并进行数据预览和分析\r\nx_example,y_example = next(iter(dataloader[\"train\"]))\r\nexample_clasees = image_datasets[\"train\"].classes\r\n\r\nindex_classes = image_datasets[\"train\"].class_to_idx\r\n\r\nimg = torchvision.utils.make_grid(x_example)\r\nimg = img.numpy().transpose([1,2,0])\r\nprint([example_clasees[i] for i in y_example])\r\n\r\n\r\n\r\nimg = img / 2 + 0.5 # 将图像数据归一化到 [0, 1] 的范围内\r\nplt.imshow(img)\r\nplt.show()\r\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\r\n if drop_prob == 0. or not training:\r\n return x\r\n keep_prob = 1 - drop_prob\r\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\r\n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\r\n random_tensor.floor_() # binarize\r\n output = x.div(keep_prob) * random_tensor\r\n return output\r\n\r\nclass DropPath(nn.Module):\r\n \"\"\"\r\n Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\r\n \"\"\"\r\n def __init__(self, drop_prob=None):\r\n super(DropPath, self).__init__()\r\n self.drop_prob = drop_prob\r\n\r\n def forward(self, x):\r\n return drop_path(x, self.drop_prob, self.training)\r\nclass PatchEmbed(nn.Module):\r\n \"\"\"\r\n 2D Image to Patch Embedding\r\n \"\"\"\r\n def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):\r\n super().__init__()\r\n img_size = (img_size, img_size)\r\n patch_size = (patch_size, patch_size)\r\n self.img_size = img_size\r\n self.patch_size = patch_size\r\n self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])\r\n self.num_patches = self.grid_size[0] * self.grid_size[1]\r\n\r\n self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)\r\n self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\r\n\r\n def forward(self, x):\r\n B, C, H, W = x.shape\r\n assert H == self.img_size[0] and W == self.img_size[1], \\\r\n f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\r\n\r\n # flatten: [B, C, H, W] -> [B, C, HW]\r\n # transpose: [B, C, HW] -> [B, HW, C]\r\n x = self.proj(x).flatten(2).transpose(1, 2)\r\n x = self.norm(x)\r\n return x\r\nclass Attention(nn.Module):\r\n def __init__(self,\r\n dim, # 输入token的dim\r\n num_heads=8,\r\n qkv_bias=False,\r\n qk_scale=None,\r\n attn_drop_ratio=0.,\r\n proj_drop_ratio=0.):\r\n super(Attention, self).__init__()\r\n self.num_heads = num_heads\r\n head_dim = dim // num_heads\r\n self.scale = qk_scale or head_dim ** -0.5\r\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\r\n self.attn_drop = nn.Dropout(attn_drop_ratio)\r\n self.proj = nn.Linear(dim, dim)\r\n self.proj_drop = nn.Dropout(proj_drop_ratio)\r\n\r\n def forward(self, x):\r\n # [batch_size, num_patches + 1, total_embed_dim]\r\n B, N, C = x.shape\r\n\r\n # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim]\r\n # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]\r\n # permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]\r\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n # [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\r\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\r\n\r\n # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]\r\n # @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]\r\n attn = (q @ k.transpose(-2, -1)) * self.scale\r\n attn = attn.softmax(dim=-1)\r\n attn = self.attn_drop(attn)\r\n\r\n # @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\r\n # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]\r\n # reshape: -> [batch_size, num_patches + 1, total_embed_dim]\r\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\r\n x = self.proj(x)\r\n x = self.proj_drop(x)\r\n return x\r\n\r\nclass Mlp(nn.Module):\r\n \"\"\"\r\n MLP as used in Vision Transformer, MLP-Mixer and related networks\r\n \"\"\"\r\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\r\n super().__init__()\r\n out_features = out_features or in_features\r\n hidden_features = hidden_features or in_features\r\n self.fc1 = nn.Linear(in_features, hidden_features)\r\n self.act = act_layer()\r\n self.fc2 = nn.Linear(hidden_features, out_features)\r\n self.drop = nn.Dropout(drop)\r\n\r\n def forward(self, x):\r\n x = self.fc1(x)\r\n x = self.act(x)\r\n x = self.drop(x)\r\n x = self.fc2(x)\r\n x = self.drop(x)\r\n return x\r\nclass Block(nn.Module):\r\n def __init__(self,\r\n dim,\r\n num_heads,\r\n mlp_ratio=4.,\r\n qkv_bias=False,\r\n qk_scale=None,\r\n drop_ratio=0.,\r\n attn_drop_ratio=0.,\r\n drop_path_ratio=0.,\r\n act_layer=nn.GELU,\r\n norm_layer=nn.LayerNorm):\r\n super(Block, self).__init__()\r\n self.norm1 = norm_layer(dim)\r\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)\r\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()\r\n self.norm2 = norm_layer(dim)\r\n mlp_hidden_dim = int(dim * mlp_ratio)\r\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)\r\n\r\n def forward(self, x):\r\n x = x + self.drop_path(self.attn(self.norm1(x)))\r\n x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n return x\r\nimport torch.nn as nn\r\n\r\ndef _init_vit_weights(module):\r\n if isinstance(module, nn.Linear):\r\n nn.init.xavier_uniform_(module.weight)\r\n if module.bias is not None:\r\n nn.init.constant_(module.bias, 0)\r\n elif isinstance(module, nn.LayerNorm):\r\n nn.init.constant_(module.weight, 1)\r\n nn.init.constant_(module.bias, 0)\r\nclass VisionTransformer(nn.Module):\r\n def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,\r\n embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,\r\n qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,\r\n attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None,\r\n act_layer=None):\r\n \"\"\"\r\n Args:\r\n img_size (int, tuple): input image size\r\n patch_size (int, tuple): patch size\r\n in_c (int): number of input channels\r\n num_classes (int): number of classes for classification head\r\n embed_dim (int): embedding dimension\r\n depth (int): depth of transformer\r\n num_heads (int): number of attention heads\r\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n qkv_bias (bool): enable bias for qkv if True\r\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\r\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n distilled (bool): model includes a distillation token and head as in DeiT models\r\n drop_ratio (float): dropout rate\r\n attn_drop_ratio (float): attention dropout rate\r\n drop_path_ratio (float): stochastic depth rate\r\n embed_layer (nn.Module): patch embedding layer\r\n norm_layer: (nn.Module): normalization layer\r\n \"\"\"\r\n super(VisionTransformer, self).__init__()\r\n self.num_classes = num_classes\r\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\r\n self.num_tokens = 2 if distilled else 1\r\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\r\n act_layer = act_layer or nn.GELU\r\n\r\n self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim)\r\n num_patches = self.patch_embed.num_patches\r\n\r\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\r\n self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\r\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\r\n self.pos_drop = nn.Dropout(p=drop_ratio)\r\n\r\n dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule\r\n self.blocks = nn.Sequential(*[\r\n Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\r\n drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i],\r\n norm_layer=norm_layer, act_layer=act_layer)\r\n for i in range(depth)\r\n ])\r\n self.norm = norm_layer(embed_dim)\r\n\r\n # Representation layer\r\n if representation_size and not distilled:\r\n self.has_logits = True\r\n self.num_features = representation_size\r\n self.pre_logits = nn.Sequential(OrderedDict([\r\n (\"fc\", nn.Linear(embed_dim, representation_size)),\r\n (\"act\", nn.Tanh())\r\n ]))\r\n else:\r\n self.has_logits = False\r\n self.pre_logits = nn.Identity()\r\n\r\n # Classifier head(s)\r\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\r\n self.head_dist = None\r\n if distilled:\r\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\r\n\r\n # Weight init\r\n nn.init.trunc_normal_(self.pos_embed, std=0.02)\r\n if self.dist_token is not None:\r\n nn.init.trunc_normal_(self.dist_token, std=0.02)\r\n\r\n nn.init.trunc_normal_(self.cls_token, std=0.02)\r\n self.apply(_init_vit_weights)\r\n\r\n def forward_features(self, x):\r\n # [B, C, H, W] -> [B, num_patches, embed_dim]\r\n x = self.patch_embed(x) # [B, 196, 768]\r\n # [1, 1, 768] -> [B, 1, 768]\r\n cls_token = self.cls_token.expand(x.shape[0], -1, -1)\r\n if self.dist_token is None:\r\n x = torch.cat((cls_token, x), dim=1) # [B, 197, 768]\r\n else:\r\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\r\n\r\n x = self.pos_drop(x + self.pos_embed)\r\n x = self.blocks(x)\r\n x = self.norm(x)\r\n if self.dist_token is None:\r\n return self.pre_logits(x[:, 0])\r\n else:\r\n return x[:, 0], x[:, 1]\r\n\r\n def forward(self, x):\r\n x = self.forward_features(x)\r\n if self.head_dist is not None:\r\n x, x_dist = self.head(x[0]), self.head_dist(x[1])\r\n if self.training and not torch.jit.is_scripting():\r\n # during inference, return the average of both classifier predictions\r\n return x, x_dist\r\n else:\r\n return (x + x_dist) / 2\r\n else:\r\n x = self.head(x)\r\n return x\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\ndef vit_cat_vs_dog(num_classes: int = 2):\r\n\r\n model = VisionTransformer(img_size=224,\r\n patch_size=16,\r\n embed_dim=192, # 原 768\r\n depth=3, # block 深度 原来12\r\n num_heads=4, # 原来 12\r\n representation_size=None,\r\n num_classes=num_classes)\r\n return model\r\nmodel = vit_cat_vs_dog().to(device)\r\nloss_f = torch.nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(model.parameters(), lr=.00001)\r\n\r\nepoch_n = 10\r\ntime_open = time.time()\r\n\r\nfor epoch in range(epoch_n):\r\n print(\"Epoch{}/{}\".format(epoch, epoch_n - 1))\r\n print(\"-\" * 10)\r\n\r\n for phase in [\"train\", \"valid\"]:\r\n if phase == \"train\":\r\n print(\"Training...\")\r\n model.train(True)\r\n else:\r\n print(\"Validing...\")\r\n model.train(False)\r\n\r\n running_loss = .0\r\n running_corrects = 0\r\n\r\n for batch, data in enumerate(dataloader[phase], 1):\r\n x, y = data\r\n\r\n # if Use_gpu:\r\n # x,y = Variable(x.cuda()),Variable(y.cuda())\r\n # else:\r\n # x,y = Variable(X),Variable(y)\r\n x, y = Variable(x.to(device)), Variable(y.to(device))\r\n # print(x.shape)\r\n y_pred = model(x)\r\n\r\n _, pred = torch.max(y_pred.data, 1)\r\n\r\n optimizer.zero_grad()\r\n\r\n loss = loss_f(y_pred, y)\r\n\r\n if phase == \"train\":\r\n loss.backward()\r\n optimizer.step()\r\n\r\n running_loss += loss.data.item()\r\n running_corrects += torch.sum(pred == y.data)\r\n\r\n if batch % 500 == 0 and phase == \"train\":\r\n print(\"Batch{},Train Loss:{:.4f},Train ACC:{:.4f}\".format(batch, running_loss / batch,\r\n 100 * running_corrects / (16 * batch)))\r\n\r\n epoch_loss = running_loss * 16 / len(image_datasets[phase])\r\n epoch_acc = 100 * running_corrects / len(image_datasets[phase])\r\n\r\n print(\"{} Loss:{:.4f} Acc:{:.4f}%\".format(phase, epoch_loss, epoch_acc))\r\n\r\ntime_end = time.time() - time_open\r\nprint(time_end)\r\n","repo_name":"xxiaoxu5/-","sub_path":"avit.py","file_name":"avit.py","file_ext":"py","file_size_in_byte":15290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"10747608704","text":"\"\"\"\nscript to train on CSL task.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch_geometric.datasets import GNNBenchmarkDataset\nfrom models.input_encoder import EmbeddingEncoder\nimport train_utils\nfrom interfaces.pl_model_interface import PlGNNTestonValModule\nfrom interfaces.pl_data_interface import PlPyGDataTestonValModule\nfrom lightning.pytorch import seed_everything\nfrom lightning.pytorch import Trainer\nfrom lightning.pytorch.loggers import WandbLogger\nfrom lightning.pytorch.callbacks import ModelCheckpoint, LearningRateMonitor, Timer\nfrom lightning.pytorch.callbacks.progress import TQDMProgressBar\nimport torchmetrics\nimport wandb\nfrom torch_geometric.data import Data\nimport torch_geometric.transforms as T\n\n\ndef add_node_feature(data: Data) -> Data:\n r\"\"\"Add identical initial node feature to all graphs.\n Arg:\n data (Data): PyG data.\n \"\"\"\n data.x = torch.zeros([data.num_nodes, 1]).long()\n return data\n\n\ndef main():\n parser = train_utils.args_setup()\n parser.add_argument('--dataset_name', type=str, default=\"CSL\", help='Name of dataset.')\n parser.add_argument('--folds', type=int, default=10, help='Number of fold in K-fold cross validation.')\n args = parser.parse_args()\n args = train_utils.update_args(args)\n\n path, pre_transform, follow_batch = train_utils.data_setup(args)\n\n dataset = GNNBenchmarkDataset(path,\n name=args.dataset_name,\n pre_transform=T.Compose([add_node_feature, pre_transform]),\n transform=train_utils.PostTransform(args.wo_node_feature, args.wo_edge_feature))\n args.out_channels = dataset.num_classes\n\n for fold, (train_idx, test_idx, val_idx) in enumerate(\n zip(*train_utils.k_fold(dataset, args.folds, args.seed))):\n\n # Set random seed\n seed = train_utils.get_seed(args.seed)\n seed_everything(seed)\n\n train_dataset = dataset[train_idx]\n val_dataset = dataset[val_idx]\n test_dataset = dataset[test_idx]\n\n logger = WandbLogger(name=f'fold_{str(fold+1)}',\n project=args.exp_name,\n save_dir=args.save_dir,\n offline=args.offline)\n logger.log_hyperparams(args)\n timer = Timer(duration=dict(weeks=4))\n\n datamodule = PlPyGDataTestonValModule(train_dataset=train_dataset,\n val_dataset=val_dataset,\n test_dataset=test_dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n follow_batch=follow_batch)\n loss_cri = nn.CrossEntropyLoss()\n evaluator = torchmetrics.classification.MulticlassAccuracy(num_classes=dataset.num_classes)\n args.mode = \"max\"\n init_encoder = EmbeddingEncoder(dataset.num_features, args.hidden_channels)\n\n modelmodule = PlGNNTestonValModule(loss_criterion=loss_cri,\n evaluator=evaluator,\n args=args,\n init_encoder=init_encoder)\n trainer = Trainer(accelerator=\"auto\",\n devices=\"auto\",\n max_epochs=args.num_epochs,\n enable_checkpointing=True,\n enable_progress_bar=True,\n logger=logger,\n callbacks=[TQDMProgressBar(refresh_rate=20),\n ModelCheckpoint(monitor=\"val/metric\", mode=args.mode),\n LearningRateMonitor(logging_interval=\"epoch\"),\n timer])\n\n trainer.fit(modelmodule, datamodule=datamodule)\n val_result, test_result = trainer.test(modelmodule, datamodule=datamodule, ckpt_path=\"best\")\n results = {\"final/best_val_metric\": val_result[\"val/metric\"],\n \"final/best_test_metric\": test_result[\"test/metric\"],\n \"final/avg_train_time_epoch\": timer.time_elapsed(\"train\") / args.num_epochs,\n }\n logger.log_metrics(results)\n wandb.finish()\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JiaruiFeng/N2GNN","sub_path":"train_CSL.py","file_name":"train_CSL.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"73971573638","text":"import json\n\ndef sort_erg(item):\n\treturn item[\"zeit\"]\n\n\nergebnisse = [\n\t{\n\t\t\"id\": 1,\n\t\t\"zeit\": \"561087\"\n\t}, {\n\t\t\"id\": 2,\n\t\t\"zeit\": \"531874\"\n\t}, {\n\t\t\"id\": 3,\n\t\t\"zeit\": \"561187\"\n\t}, {\n\t\t\"id\": 4,\n\t\t\"zeit\": \"547954\"\n\t}\n]\n\nsorted_erg = sorted(ergebnisse, key=sort_erg)\n\nprint(sorted_erg)","repo_name":"Heddy147/ias","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"3744492299","text":"#!/bin/python\n\nimport logging\nimport os\nimport subprocess\nimport time\n\nlogging.basicConfig(level=logging.INFO)\n\nFNULL = open(os.devnull, \"w\")\n\n\nclass CommonUtils:\n _default_wait_time_between_calls = 10\n _max_retries_call = 5\n\n def can_retry_call(self, call_try):\n if call_try <= self._max_retries_call:\n return True\n else:\n return False\n\n def retry_call(self, cmd_call, call_try):\n logging.info(\n \"Try number for call {}: Waiting {} seconds before try call \"\n \"{} again\".format(call_try, self._default_wait_time_between_calls, cmd_call)\n )\n time.sleep(self._default_wait_time_between_calls)\n call_result = subprocess.call(cmd_call.split(\", \"), stdout=FNULL)\n call_try += 1\n return call_result, call_try\n\n @staticmethod\n def check_current_state_call(current_state, aws_resource, aws_resource_name):\n if current_state == 0:\n logging.info(\n \"AWS resource {} with name {} was successfully removed\".format(aws_resource, aws_resource_name)\n )\n else:\n logging.info(\n \"There where problems trying to delete AWS resource {} with name {}\".format(\n aws_resource, aws_resource_name\n )\n )\n","repo_name":"Bruin-Dev/Intelygenz","sub_path":"ci-utils/delete-environments-aws-resources/common_utils.py","file_name":"common_utils.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"947449214","text":"\"\"\"\nTest fixed sampling strategy.\n\"\"\"\n\nimport random\nimport logging\nfrom poap.strategy import FixedSampleStrategy\nfrom poap.strategy import CheckWorkerStrategy\nfrom poap.controller import SimTeamController\nfrom poap.test.monitor import add_monitor\n\n\ndef objective(x):\n \"Objective function\"\n return (x-0.123)*(x-0.123)\n\n\ndef delay(record):\n return 5 + 5 * (record.params[0] > 0.25)\n\n\ndef main():\n \"Testing routine.\"\n logging.basicConfig(format=\"%(name)-18s: %(levelname)-8s %(message)s\",\n level=logging.INFO)\n\n samples = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n\n controller = SimTeamController(objective, delay, 5)\n strategy = FixedSampleStrategy(samples)\n strategy = CheckWorkerStrategy(controller, strategy)\n controller.strategy = strategy\n add_monitor(controller, 1)\n result = controller.run()\n print(\"Final: {0:.3e} @ {1}\".format(result.value, result.params))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dbindel/POAP","sub_path":"poap/test/test_simteam_controller.py","file_name":"test_simteam_controller.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"62"} +{"seq_id":"23173972245","text":"import pygame\nimport sys\nimport math\nimport Ai, board\n\nBLUE = (0,0,200)\nBLACK = (0,0,0)\nRED = (255,0,0)\nYELLOW = (255,255,0)\nWHITE =(255,255,255)\nGRAY = (120,120,120)\nROW_COUNT = 6\nCOLUMN_COUNT = 7\n\n\ngame_board= board.board()\n\nred_piece = pygame.image.load('smallred.png')\nyellow_piece = pygame.image.load('smallyellow.png')\n\n\ngame_over = False\nturn = 0\nplayer2 = Ai.AI_player(2, 1, True)\n\npygame.init()\n\nPLAYER = 0\nAI = 1\n\nSQUARESIZE = 100\n\nwidth = COLUMN_COUNT * SQUARESIZE\nheight = (ROW_COUNT+1) * SQUARESIZE\n\nsize = (width, height)\n\nRADIUS = int(SQUARESIZE/2 - 5)\n\nscreen = pygame.display.set_mode(size)\ngame_board.draw_board(screen)\npygame.display.update()\n\nmyfont = pygame.font.SysFont(\"comic sans \", 75)\n\nwhile not game_over:\n\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\n\t\tif event.type == pygame.MOUSEMOTION:\n\t\t\tpygame.draw.rect(screen, GRAY, (0,0, width, SQUARESIZE))\n\n\t\t\tposx = event.pos[0]\n\t\t\tif turn == 0:\n\n\t\t\t\tpygame.draw.circle(screen, RED, (posx, int(SQUARESIZE/2)), RADIUS)\n\t\t\t\tscreen.blit(red_piece,(int(posx)-50,0))\n\t\t\telse: \n\t\t\t\tpygame.draw.circle(screen, YELLOW, (posx, int(SQUARESIZE/2)), RADIUS)\n\t\tpygame.display.update()\n\n\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tpygame.draw.rect(screen, GRAY, (0,0, width, SQUARESIZE))\n\n\t\t\t# Ask for Player 1 Input\n\t\t\tif turn == 0:\n\t\t\t\t#cursor\n\t\t\t\tposx = event.pos[0]\n\t\t\t\tcol = int(math.floor(posx/SQUARESIZE))\n\n\t\t\t\tif game_board.is_valid_location(col):\n\t\t\t\t\trow = game_board.get_next_open_row( col)\n\t\t\t\t\tgame_board.drop_piece( row, col, 1)\n\n\t\t\t\t\tif game_board.winning_move( 1):\n\t\t\t\t\t\tlabel = myfont.render(\"Player 1 wins!!\", 1, RED)\n\t\t\t\t\t\tscreen.blit(label, (40,10))\n\t\t\t\t\t\tgame_over = True\n\t\t\t\t\t\n\t\t\t\t\tturn+=1 \n\t\t\t\t\tturn = turn % 2\n\n\n\t\t\t\t\tgame_board.draw_board(screen)\n\n\t\t\t# # Ask for Player 2 Input\n\t\t\tif turn == 1:\t\t\t\n\t\t\t\tposx = event.pos[0]\n\t\t\t\tplayer2.computer_turn(game_board,game_over)\n\t\t\t\tnextlvl = not player2.level\n\t\t\t\t\n\n\t\t\t\tif game_board.winning_move( player2.AI_PIECE):\n\t\t\t\t\tlabel = myfont.render(\"Computer wins!!\", 1, YELLOW)\n\t\t\t\t\tscreen.blit(label, (40,10))\n\t\t\t\t\tgame_over = True\n\n\t\t\t\t\n\t\t\t\tgame_board.draw_board(screen)\n\n\t\t\tturn += 1\n\t\t\tturn = turn % 2\n\tif game_over:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\tpygame.time.wait(6000)\n\n\t\t\t\n\t\t\t\t\n","repo_name":"Grantjjscott/pythonprojects","sub_path":"Conn4/driverAI.py","file_name":"driverAI.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29108892345","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm\nfrom .models import Profile\nfrom .serializers import AccountSerializer, AccountPhotoSerializer, AccountDetailsSerializer\nfrom django.contrib.auth.models import User, auth\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework.decorators import parser_classes\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authtoken.models import Token\n\nimport json\n\n#login\ndef user_login(request):\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(request, username=cd['username'], password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponse(\"Authenticated successfully\")\n else:\n return HttpResponse(\"Disabled account\")\n else:\n return HttpResponse(\"Invalid login\")\n\n else:\n form = LoginForm()\n\n return render(request, 'account/login.html', {'form': form})\n\n@login_required\ndef dashboard(request):\n return render(request, 'account/dashboard.html', {'section': 'dashboard'})\n\n\n\ndef user_register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'账户 {username} 创建成功!请登录。')\n return redirect('login')\n else:\n form = UserCreationForm()\n return render(request, 'register.html', {'form': form})\n\ndef user_login(request):\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(request, username=cd['username'], password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponse(\"Authenticated successfully\")\n else:\n return HttpResponse(\"Disabled account\")\n else:\n return HttpResponse(\"Invalid login\")\n\n else:\n form = LoginForm()\n\n return render(request, 'account/login.html', {'form': form})\n\n@login_required\ndef dashboard(request):\n return render(request, 'account/dashboard.html', {'section': 'dashboard'})\n\n\n#register\ndef register(request):\n if request.method == \"POST\":\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n Profile.objects.create(user=new_user)\n return render(request, 'account/register_done.html', {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request, 'account/register.html', {'user_form': user_form})\n\n#search users\ndef search_user(request):\n if request.method == 'POST':\n search_term = request.POST.get('search_term')\n users = User.objects.filter(username__icontains=search_term)\n return render(request, 'account/search_user.html', {'users': users, 'search_term': search_term})\n else:\n return render(request, 'account/search_user.html')\n\n\n@login_required\ndef edit(request):\n Profile.objects.get_or_create(user=request.user)\n if request.method == \"POST\":\n user_form = UserEditForm(instance=request.user, data=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile, data=request.POST, files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n\n return render(request, 'account/edit.html', {'user_form': user_form, 'profile_form': profile_form})\n\n@api_view(['PUT'])\n@authentication_classes([])\n@parser_classes([MultiPartParser])\ndef upload_profile_pic(request):\n user = auth.get_user(request)\n profile, created = Profile.objects.get_or_create(user=user)\n\n picture = AccountPhotoSerializer(profile, data=request.data)\n if picture.is_valid():\n picture.save()\n return Response(picture.data)\n \n return Response(json.dumps(request.data['photo']))\n\n@api_view(['POST'])\n@authentication_classes([])\ndef change_details(request):\n user = auth.get_user(request)\n userObject = User.objects.get(id=user.id)\n\n userAuth = auth.authenticate(username=user.username, password=request.data['password'])\n if(userAuth is not None):\n\n data = {'username': request.data['username'], 'email': request.data['email']}\n userSerializer = AccountDetailsSerializer(userObject, data=data)\n \n if(userSerializer.is_valid()):\n userSerializer.save()\n return Response(userSerializer.data)\n \n return Response({'success':False},status=status.HTTP_400_BAD_REQUEST)","repo_name":"MiB3Avenger/wave","sub_path":"django/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"14594556279","text":"from typing import Deque, List, Tuple\nfrom collections import deque\nimport math\n\nDIRECTIONS = ((0, 1), (1, 0), (0, -1), (-1, 0)) # East, South, West, North\n\n\ndef get_input(data_file: str) -> List:\n \"\"\"Read data file and return as list.\"\"\"\n with open(data_file) as f:\n grid = []\n for line in f.readlines():\n row = [int(x) for x in line.strip()]\n grid.append(row)\n return grid\n\n\nclass LavaCave:\n def __init__(self, grid: List):\n self.grid = grid\n self.east_edge = len(grid[0])\n self.south_edge = len(grid)\n\n def get_neighbours(self, location: Tuple) -> List:\n \"\"\"\n Return all neighbours that are to the east, south, north or west of\n the location.\n \"\"\"\n neighbours = []\n row, col = location\n for direction in DIRECTIONS:\n ns, ew = direction\n new_row = row + ns\n new_col = col + ew\n if 0 <= new_row < self.south_edge and 0 <= new_col < self.east_edge:\n neighbours.append((new_row, new_col))\n return neighbours\n\n @property\n def low_points(self) -> List:\n \"\"\"\n Return all low points. These are locations that are lower than any of\n its adjacent locations.\n \"\"\"\n low_points = []\n for row in range(self.south_edge):\n for col in range(self.east_edge):\n height = self.grid[row][col]\n neighbours = self.get_neighbours((row, col))\n heights = [self.grid[r][c] for r, c in neighbours]\n if height < min(heights):\n low_points.append((row, col))\n return low_points\n\n def size_basin(self, low_point: Tuple) -> int:\n \"\"\"\n Return size of basin. Uses breadth first search to determine the size\n of the basin.\n \"\"\"\n basin = 0\n frontier: Deque = deque()\n frontier.append(low_point)\n seen = {low_point}\n while frontier:\n location = frontier.popleft()\n row, col = location\n if self.grid[row][col] < 9:\n basin += 1\n neighbours = self.get_neighbours(location)\n for nb in neighbours:\n if nb in seen:\n continue\n r, c = nb\n if self.grid[r][c] < 9:\n frontier.append(nb)\n seen.add(nb)\n return basin\n\n def compute_p1(self) -> int:\n \"\"\"\n Return the sum of the risk levels of all low points. Risk level is\n height + 1. Answer part 1.\n \"\"\"\n all_lows = [self.grid[row][col] for row, col in self.low_points]\n return sum(all_lows) + (len(all_lows) * 1)\n\n def compute_p2(self) -> int:\n \"\"\"\n Return the product of the sizes of the largest 3 basins. Answer\n part 2.\n \"\"\"\n basins = []\n for low in self.low_points:\n basins.append(self.size_basin(low))\n largest = sorted(basins, reverse=True)[:3]\n return math.prod(largest)\n\n\nif __name__ == \"__main__\":\n e1 = get_input(\"examples/e2021_09.txt\")\n t1 = LavaCave(e1)\n assert t1.compute_p1() == 15\n assert t1.compute_p2() == 1134\n\n day9 = get_input(\"inputs/2021_09.txt\")\n d1 = LavaCave(day9)\n print(\"day 9 part 1 =\", d1.compute_p1())\n print(\"day 9 part 2 =\", d1.compute_p2())\n","repo_name":"DeCooper88/AOC2021","sub_path":"AOC2021_09.py","file_name":"AOC2021_09.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"32058606556","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n#ordinal encoding\r\nage_dict={'18-24':6,'25-34':5,'35-44':4,'45-54':3,'55-64':2,'65+':1}\r\neducation_dict={'Doct Deg':9,'Mast Deg':8,'Prof Cert':7,'Univ Deg':6,'Some Clg':5,\r\n 'LS@18Y':4,'LS@17Y':3,'LS@16Y':2,'LSB 16Y':1}\r\nethnicity_list=['Asian','Black','Mixed-Black/Asian','Mixed-White/Asian','Mixed-White/Black','White','other']\r\ngender_list=['Female','Male']\r\ncountry_list=['Australia', 'Canada', 'New Zealand','Other', 'Reb of Inreland', 'UK', 'USA']\r\n\r\ndef encdoding_data(age,education,n_score_values,e_score_values,o_score_values,a_score_values,c_score_values,impulsive_val,ss_val,country,ethinicty,gender):\r\n features = [age_dict[age],education_dict[education],n_score_values,e_score_values,\r\n o_score_values,a_score_values,c_score_values,impulsive_val,ss_val]\r\n encode_1 = pd.DataFrame(data={'ethni': ethnicity_list})\r\n encode_2 = pd.DataFrame(data={'Gender': gender_list})\r\n encode_3 = pd.DataFrame(data={'country': country_list})\r\n\r\n cou_enc = pd.get_dummies(encode_3['country']).iloc[country_list.index(country), :].values\r\n eth_enc = pd.get_dummies(encode_1['ethni']).iloc[ethnicity_list.index(ethinicty), :].values\r\n gen_enc = pd.get_dummies(encode_2['Gender']).iloc[gender_list.index(gender), :].values\r\n\r\n a, b,c = list(cou_enc), list(eth_enc),list(gen_enc)\r\n\r\n features.extend(a)\r\n features.extend(b)\r\n features.extend(c)\r\n in_arr=np.array([features],dtype=float)\r\n\r\n return in_arr","repo_name":"Jaiharish-passion07/Drug_consumption","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25447710982","text":"from flask_t import request, Flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n user_agent = request.headers.get('User-Agent')\n return '

    Your browser is %s

    ' % user_agent\n\n\nif __name__ == '__main__':\n app.run(port=8080)\n","repo_name":"tianmingbo/GOOD-GOOD-STUDY","sub_path":"python大法/flask源码学习/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"62"} +{"seq_id":"8892221724","text":"class PRNG_Mersenne:\n def __init__(self):\n # De coefficienten\n self.a = 0x9908b0df\n self.b = 0x9d2c5680\n self.c = 0xEFC60000\n self.f = 0x6c078965\n self.l = 18\n self.m = 397\n self.n = 624\n self.s = 7\n self.t = 15\n self.u = 11\n self.w = 32\n self.r = 31\n self.state = [0] * 624\n self.seed()\n self.index = 625\n self.lower_mask = 0xFFFFFFFF\n self.upper_mask = 0x00000000\n\n def seed(self, a=0):\n self.state[0] = a\n for i in range(1, self.n):\n temp = (\n self.f * (self.state[i - 1] ^ (self.state[i - 1] >> (self.w - 2))) + i)\n self.state[i] = self.int_32(temp)\n\n def twister(self):\n for i in range(0, self.n):\n x = (self.state[i] & self.upper_mask) + \\\n (self.state[(i + 1) % self.n] & self.lower_mask)\n xA = x >> 1\n if (x % 2) != 0:\n xA = xA ^ self.a\n self.state[i] = self.state[(i + self.m) % self.n] ^ xA\n self.index = 0\n\n def gen_random_int(self):\n # Genereert eem random integer getal\n if self.index >= self.n:\n self.twister()\n\n y = self.state[self.index]\n y = y ^ (y >> self.u)\n y = y ^ ((y << self.s) & self.b)\n y = y ^ ((y << self.t) & self.c)\n y = y ^ (y >> self.l)\n\n self.index += 1\n\n return self.int_32(y)\n\n def int_32(self, number):\n return int(number & 0xFFFFFFFF)\n\n def random(self):\n return self.gen_random_int() / 4294967296 # 0xFFFFFFFF + 1 regel toepassen\n\n def randrange(self, a, b):\n # Eigen versie van de vorm van random.py\n n = self.random()\n return int(n / (1 / (b - a)) + a)\n\n def randint(self, a, b):\n return self.randrange(a, b + 1)\n\n\nwith open('C:/Users/jaspe/Downloads/mersenneTest.txt', 'r+') as f:\n l = [int(x.strip()) for x in f]\nrng = PRNG_Mersenne()","repo_name":"Expensure/MersenneRandomizer","sub_path":"Mersenne.py","file_name":"Mersenne.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"43870388394","text":"import sys, string\nimport os\nimport socket\nimport time\nimport operator\nimport boto3\nimport json\nfrom pyspark.sql import SparkSession\nfrom datetime import datetime\nfrom operator import add\nfrom functools import partial\nfrom operator import itemgetter\n\n\nif __name__ == \"__main__\":\n\n spark = SparkSession\\\n .builder\\\n .appName(\"gg\")\\\n .getOrCreate()\n \n def trans_good_line(line):\n try:\n fields = line.split(',')\n if len(fields)!=15: # Should be 15 fields\n return False\n int(fields[3]) # Block num field should be integer\n return True\n except:\n return False\n \n def contract_good_line(line):\n try:\n fields = line.split(',')\n if len(fields)!=6: # Should be 6 fields\n return False\n if fields[3]=='True' or fields[3]=='False': # is_erc20 column should be either True or False\n return True\n except:\n return False\n\n # shared read-only object bucket containing datasets\n s3_data_repository_bucket = os.environ['DATA_REPOSITORY_BUCKET']\n\n s3_endpoint_url = os.environ['S3_ENDPOINT_URL']+':'+os.environ['BUCKET_PORT']\n s3_access_key_id = os.environ['AWS_ACCESS_KEY_ID']\n s3_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']\n s3_bucket = os.environ['BUCKET_NAME']\n\n hadoopConf = spark.sparkContext._jsc.hadoopConfiguration()\n hadoopConf.set(\"fs.s3a.endpoint\", s3_endpoint_url)\n hadoopConf.set(\"fs.s3a.access.key\", s3_access_key_id)\n hadoopConf.set(\"fs.s3a.secret.key\", s3_secret_access_key)\n hadoopConf.set(\"fs.s3a.path.style.access\", \"true\")\n hadoopConf.set(\"fs.s3a.connection.ssl.enabled\", \"false\")\n\n trans_lines = spark.sparkContext.textFile(\"s3a://\" + s3_data_repository_bucket + \"/ECS765/ethereum-parvulus/transactions.csv\")\n con_lines = spark.sparkContext.textFile(\"s3a://\" + s3_data_repository_bucket + \"/ECS765/ethereum-parvulus/contracts.csv\")\n \n \n trans_clean_lines = trans_lines.filter(trans_good_line)\n contract_clean_lines = con_lines.filter(contract_good_line)\n \n \n t_ds= trans_clean_lines.map(lambda l: (l.split(\",\")[6],(l.split(\",\")[11],l.split(\",\")[8]))) #(to_add,(date,gas))\n con_ds = contract_clean_lines.map(lambda c: (c.split(',')[0], 1)) #(add, 1)\n \n \n \n # Gas_used for contracts vs time\n join_ds=con_ds.join(t_ds) #(add, (1, (date,gas)))\n \n gas = join_ds.map(lambda t: (time.strftime(\"%m-%Y\",time.gmtime(int(t[1][1][0]))),int(t[1][1][1]))) #(date,gas)\n gasvstime = gas.reduceByKey(add) #(date, sum_monthly_gas)\n print(\"Gas\",gas.take(10))\n print(\"GasvsTime\",gasvstime.collect())\n \n my_bucket_resource = boto3.resource('s3',\n endpoint_url='http://' + s3_endpoint_url,\n aws_access_key_id=s3_access_key_id,\n aws_secret_access_key=s3_secret_access_key)\n \n my_result_object = my_bucket_resource.Object(s3_bucket,'cwgg1/q4gasvstime.txt')\n my_result_object.put(Body=json.dumps(gasvstime.collect()))\n \n spark.stop()\n","repo_name":"riyadodthi/Etherum-Analysis","sub_path":"PART_D/Gas Guzzlers/q4gasvstime.py","file_name":"q4gasvstime.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13906750174","text":"\nteach_level_choices = (\n ('Preschool', 'Preschool'),\n ('Primary', 'Primary'),\n ('Junior High', 'Junior High'),\n ('Senior High', 'Senior High'),\n ('Tertiary', 'Tertiary'),\n ('Adult Education', 'Adult Education'),\n ('All ages', 'All ages')\n)\n\nhighest_education_choices = (\n ('highschool', 'Highschool'),\n ('undergrad', 'Undergrad'),\n ('graduate', 'Graduate'),\n ('postgrad', 'Post Graduate'),\n ('masters', 'Masters')\n)\n\nclass_type_choices = (\n ('individual', 'Individual'),\n ('group', 'Group'),\n ('online', 'Online')\n)\n\nfree_lesson_choices = (\n (45, '45 minutes'),\n (60, '1 hour'),\n (90, '1 hour 30 minutes'),\n (120, '2 hours')\n)\n\nuser_type_choices = (\n ('student', 'Student'),\n ('guardian', 'Guardian'),\n ('tutor', 'Tutor')\n )\n\nuser_profile_choices = (\n ('student', 'Student'),\n ('guardian', 'Guardian')\n )","repo_name":"stephappiah/hometutors","sub_path":"homestud/findtutors/multi_choices.py","file_name":"multi_choices.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14949324241","text":"# 1st\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import messagebox\nimport Status as st\nimport cv2\nimport webbrowser\nwindow = tk.Tk()\nvlc = st.VlcChecker()\n\n# title\nwindow.title(\"Media PLayer Controller\")\n\n# window geometer & properties\nwindow.geometry(\"600x400+0+0\")\nwindow.resizable(False, False)\nwindow.config(bg=\"#8BD2B8\")\nwindow.tk.call('wm', 'iconphoto', window._w,\n tk.PhotoImage(file='html\\\\media.png'))\n\n# To display Instruction\ninstruct_1 = tk.Label(\n window,\n text=\"Please Click on CHECK Button,To check Weather This devices Have VLC MEDIA PLAYER installed and Camera is \"\n \"available.\",\n font=(\"Comic Sans MS\", 15, \"bold\", \"italic\"),\n wraplength=550,\n justify=\"center\",\n bg=\"#8BD2B8\"\n)\n\n# ============== On click function for button =====================================\n\n# check button\n\n\ndef url_call(url):\n webbrowser.open_new_tab(url)\n\n\ndef check_onClick():\n vlc_check = vlc.getAppPath(appName=\"vlc\")\n cap = cv2.VideoCapture(0)\n #vlc_check = False\n\n if vlc_check != False:\n message_vlc_T = tk.Label(\n window,\n text=\"VLC Media Player Is Installed. \",\n font=(\"Comic Sans MS\", 10, \"italic\"),\n fg=\"Green\",\n justify=\"center\",\n bg=\"#8BD2B8\"\n )\n message_vlc_T.place(x=180, y=175)\n link = Label(\n window,\n text=\" \",\n bg=\"#8BD2B8\",\n justify=\"center\",\n )\n link.place(x=210, y=200)\n else:\n message_vlc_F = Label(\n window,\n text=\"VlC Media Player Is Not Installed !\",\n font=(\"Comic Sans MS\", 10, \"italic\"),\n fg=\"Red\", # 150877\n justify=\"center\",\n bg=\"#8BD2B8\"\n )\n message_vlc_F.place(x=180, y=175)\n link = Label(\n window,\n text=\"Click Here To Download\",\n font=(\"Comic Sans MS\", 10, \"italic\"),\n fg=\"Blue\",\n bg=\"#8BD2B8\",\n justify=\"center\",\n cursor=\"hand2\"\n )\n link.place(x=210, y=200)\n link.bind(\"\", lambda e:\n url_call(\"https://www.videolan.org/\"))\n\n if cap.isOpened():\n message_cap_T = tk.Label(\n window,\n text=\"Camera is Available. \",\n font=(\"Comic Sans MS\", 10, \"italic\"),\n fg=\"Green\",\n justify=\"center\",\n bg=\"#8BD2B8\"\n )\n message_cap_T.place(x=210, y=230)\n else:\n messag_cap_F = tk.Label(\n window,\n text=\"Camera is Not Available\",\n font=(\"Comic Sans MS\", 10, \"italic\"),\n fg=\"Red\", # 150877\n justify=\"center\",\n bg=\"#8BD2B8\"\n )\n messag_cap_F.place(x=210, y=230)\n if vlc_check != False and cap.isOpened():\n message_all_T = tk.Label(\n window,\n text=\"All Set, Ready to Run \",\n font=(\"Comic Sans MS\", 15, \"bold\", \"italic\"),\n fg=\"Black\",\n justify=\"center\",\n bg=\"#8BD2B8\"\n )\n message_all_T.place(x=200, y=270)\n else:\n message_all_F = tk.Label(\n window,\n text=\"Please meet the Requirements\",\n font=(\"Comic Sans MS\", 15, \"bold\", \"italic\"),\n fg=\"Black\",\n justify=\"center\",\n bg=\"#8BD2B8\"\n )\n message_all_F.place(x=200, y=270)\n cap.release()\n\n# run button\n\n\ndef run_onClick():\n vlc_check = vlc.getAppPath(appName=\"vlc\")\n cap = cv2.VideoCapture(0)\n if vlc_check != False and cap.isOpened():\n cap.release()\n window.destroy()\n vlc.start(name=\"vlc\")\n else:\n messagebox.showerror(\n \"ERROR\", \"Requirements Don't Match \\n Click on CHECK button to Know more\")\n\n# ==================== Creating Button's ===============================\n\n\n# button check\ncheck = Button(window, text=\"Check\",\n command=check_onClick, width=15)\n\n# button run\nrun = tk.Button(window, text=\"Run\", command=run_onClick,\n width=15)\n\n# button instruction\n\ninfo = Label(\n window,\n text=\"How to Use? Click Here !\",\n font=(\"Comic Sans MS\", 10, \"italic\"),\n fg=\"Blue\",\n bg=\"#8BD2B8\",\n justify=\"center\",\n cursor=\"hand2\"\n)\ninfo.bind(\"\", lambda e:\n url_call(\"info.html\"))\n\n\n# packing\ninstruct_1.place(x=50, y=10)\ncheck.place(x=30, y=170)\nrun.place(x=30, y=215)\ninfo.place(x=360, y=350)\n\nwindow.mainloop()\n","repo_name":"anikesh2/Handy-media-player-controller","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"3086843789","text":"import json\nimport boto3\nimport os\nimport logging\nfrom setup_logger import SetupLogging\n\nargs =[\"source\", \"api_id\"] #optional\n\n@SetupLogging(*args)\ndef lambda_handler(event, context, log_dict=None, logger=None):\n \n log_dict['api_id'] =event.get('requestContext')['requestId'] if event.get('requestContext') else None\n log_dict['source'] = context.function_name\n logger.info(\"success\", extra=log_dict)\n \n try:\n a=10\n res =a/10\n logger.info(\"success\", extra=log_dict)\n return_res= {\n 'statusCode': 200,\n 'body': json.dumps('OK')\n }\n except Exception as e:\n logger.exception(str(e), extra=log_dict)\n return_res= {\n 'statusCode': 409,\n 'body': json.dumps('Exception:{}'.format(str(e)))\n }\n \n return return_res\n \n \n","repo_name":"akhilpy/json-lambda-logger-python","sub_path":"lambda_handler.py","file_name":"lambda_handler.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22974947216","text":"import sys\n\nfrom pytube import YouTube\nfrom pprint import pprint\nfrom pytube.exceptions import MultipleObjectsReturned\nimport cv2 as cv\nimport os\nfrom scipy.misc import imresize\nimport numpy as np\nimport requests\nfrom python_speech_features import mfcc\nimport scipy.io.wavfile as wav\n\n\n'''\nGet The Actual MP4s For NBA GAMES\n'''\n\n\ndef extract_features(file, size, sample_rate):\n cap = cv.VideoCapture(file)\n fps = int(cap.get(cv.cv.CV_CAP_PROP_FPS))\n length = int(cap.get(cv.cv.CV_CAP_PROP_FRAME_COUNT))\n freq = fps // sample_rate\n\n X = np.zeros((length // freq, size[0], size[1], 3))\n i = 0\n j = 0\n\n while (cap.isOpened()):\n\n ret, frame = cap.read()\n\n if ret == True:\n \n if i % freq == 0:\n\n X[j] = imresize(frame, size)\n\n j += 1\n\n if j >= X.shape[0]:\n break\n\n i += 1\n\n else:\n break\n\n X = X.astype(np.float32)\n\n return (X)\n\n\nnba_replays = 'https://www.youtube.com/channel/UC0rDNVMafPWtpY63vFbxC3A/videos'\nhtml = requests.get(nba_replays)\ngames = [ x.split('\"')[0] for x in html.text.split('\"/watch?v=') ][1:]\ngames = ['https://www.youtube.com/watch?v=' + x for x in games ]\n\nsize = (50,50)\n\ncmd= 'ffmpeg -ac 1 -i {} {}'\nrm = 'rm {}'\nj= 0\n\nfor i,game in enumerate(games):\n\n written = True\n \n if j > 40:\n break\n\n try:\n yt = YouTube(game)\n\n yt.set_filename( 'game_'+str(i))\n\n video = yt.filter('mp4')[0]\n \n print(video)\n\n video.download('./games')\n\n\n fn = './games/game_'+str(i) +'.mp4'\n new_fn = 'game_'+str(i)+'.wav'\n\n # get .wav\n os.system(cmd.format(fn,'./games_audio/' +new_fn))\n\n # delete video file\n os.system(rm.format('./games/game_'+str(i) +'.mp4'))\n\n # read wav\n fs,x = wav.read('./games_audio/' +new_fn)\n\n #mfcc coefs\n mel= mfcc(x[:,0],fs)\n\n #save mfcc\n np.save('./games_audio/game_'+str(i)+'.npy' , mel.astype(np.float32))\n\n #remove .wav\n os.system(rm.format( './games_audio/' +new_fn ) )\n\n j+=1\n\n except:\n pass\n","repo_name":"zachzhang/deeplearning-nba","sub_path":"scrape_game_audio.py","file_name":"scrape_game_audio.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70026876359","text":"# Problem Statement 3: Check if a matrix is a identity matrix\ndef matrix_check(matrix):\n if not isinstance(matrix, list):\n raise Exception(\"matrix is not a list\")\n else:\n for row in matrix:\n if not isinstance(row, list):\n raise Exception(\"one of the row is not a list\")\n\ndef square_check(matrix):\n matrix_check(matrix)\n for row in matrix:\n if len(row) != len(matrix):\n return False\n return len(matrix)\n\ndef identity_check(matrix):\n order = square_check(matrix)\n if order:\n for row in range(order):\n for column in range(order):\n if row == column:\n if matrix[row][column] != 1:\n return False\n else:\n if matrix[row][column] != 0:\n return False\n else:\n return False\n return True\n\n# print identity_check(\n# [[1,0,0],\n# [0,1,0],\n# [0,0,1],\n# [0,0,0]])\n# print identity_check([[1,0],0,1])","repo_name":"cranticumar/IPND","sub_path":"Stage2/Python/identity_check_matrix.py","file_name":"identity_check_matrix.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15105451841","text":"from __future__ import division, unicode_literals, print_function, absolute_import\n\nimport pytest\nimport numpy as np\nimport xarray as xr\nimport scipy.stats\n\nimport podpac\nfrom podpac.core.data.types import Array\nfrom podpac.core.algorithm.stats import Min, Max, Sum, Count, Mean, Variance, Skew, Kurtosis, StandardDeviation\nfrom podpac.core.algorithm.stats import Median, Percentile\nfrom podpac.core.algorithm.stats import GroupReduce, DayOfYear\n\ndef setup_module():\n global coords, source, data\n coords = podpac.Coordinates(\n [podpac.clinspace(0, 1, 10), podpac.clinspace(0, 1, 10), podpac.crange('2018-01-01', '2018-01-10', '1,D')],\n dims=['lat', 'lon', 'time'])\n\n a = np.random.random(coords.shape)\n a[3, 0, 0] = np.nan\n a[0, 3, 0] = np.nan\n a[0, 0, 3] = np.nan\n source = Array(source=a, native_coordinates=coords)\n data = source.eval(coords)\n\nclass TestReduce(object):\n \"\"\" Tests the Reduce class \"\"\"\n\n def setup_method(self):\n # save chunk size\n self.saved_chunk_size = podpac.settings['CHUNK_SIZE']\n podpac.settings['CHUNK_SIZE'] = None\n\n def teardown_method(self):\n podpac.settings['CHUNK_SIZE'] = self.saved_chunk_size\n\n def test_auto_chunk(self):\n podpac.settings['CHUNK_SIZE'] = 'auto'\n\n # any reduce node would do here\n node = Min(source=source)\n node.eval(coords)\n\n def test_not_implemented(self):\n from podpac.core.algorithm.stats import Reduce\n\n node = Reduce(source=source)\n with pytest.raises(NotImplementedError):\n node.eval(coords)\n\n def test_chunked_fallback(self):\n from podpac.core.algorithm.stats import Reduce\n\n class First(Reduce):\n def reduce(self, x):\n return x.isel(**{dim:0 for dim in self.dims})\n\n node = First(source=source, dims='time')\n \n # use reduce function\n podpac.settings['CHUNK_SIZE'] = None\n output = node.eval(coords)\n \n # fall back on reduce function with warning\n with pytest.warns(UserWarning):\n podpac.settings['CHUNK_SIZE'] = 100\n output_chunked = node.eval(coords)\n\n # should be the same\n xr.testing.assert_allclose(output, output_chunked)\n\nclass BaseTests(object):\n \"\"\" Common tests for Reduce subclasses \"\"\"\n\n def setup_method(self):\n # save chunk size\n self.saved_chunk_size = podpac.settings['CHUNK_SIZE']\n podpac.settings['CHUNK_SIZE'] = None\n\n def teardown_method(self):\n podpac.settings['CHUNK_SIZE'] = self.saved_chunk_size\n\n def test_full(self):\n node = self.NodeClass(source=source)\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_full)\n np.testing.assert_allclose(output.data, self.expected_full.data)\n\n node = self.NodeClass(source=source, dims=coords.dims)\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_full)\n np.testing.assert_allclose(output.data, self.expected_full.data)\n\n def test_full_chunked(self):\n podpac.settings['CHUNK_SIZE'] = 100\n node = self.NodeClass(source=source, dims=coords.dims)\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_full)\n np.testing.assert_allclose(output.data, self.expected_full.data)\n\n def test_lat_lon(self):\n node = self.NodeClass(source=source, dims=['lat', 'lon'])\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_latlon)\n np.testing.assert_allclose(output.data, self.expected_latlon.data)\n\n @pytest.mark.xfail(reason=\"bug, to fix\")\n def test_lat_lon_chunked(self):\n podpac.settings['CHUNK_SIZE'] = 100\n node = self.NodeClass(source=source, dims=['lat', 'lon'])\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_latlon)\n np.testing.assert_allclose(output.data, self.expected_latlon.data)\n\n def test_time(self):\n node = self.NodeClass(source=source, dims='time')\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_time)\n np.testing.assert_allclose(output.data, self.expected_time.data)\n\n def test_time_chunked(self):\n podpac.settings['CHUNK_SIZE'] = 100\n node = self.NodeClass(source=source, dims='time')\n output = node.eval(coords)\n # xr.testing.assert_allclose(output, self.expected_time)\n np.testing.assert_allclose(output.data, self.expected_time.data)\n\nclass TestMin(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Min\n cls.expected_full = data.min()\n cls.expected_latlon = data.min(dim=['lat', 'lon'])\n cls.expected_time = data.min(dim='time')\n\nclass TestMax(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Max\n cls.expected_full = data.max()\n cls.expected_latlon = data.max(dim=['lat', 'lon'])\n cls.expected_time = data.max(dim='time')\n\nclass TestSum(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Sum\n cls.expected_full = data.sum()\n cls.expected_latlon = data.sum(dim=['lat', 'lon'])\n cls.expected_time = data.sum(dim='time')\n\nclass TestCount(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Count\n cls.expected_full = np.isfinite(data).sum()\n cls.expected_latlon = np.isfinite(data).sum(dim=['lat', 'lon'])\n cls.expected_time = np.isfinite(data).sum(dim='time')\n\nclass TestMean(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Mean\n cls.expected_full = data.mean()\n cls.expected_latlon = data.mean(dim=['lat', 'lon'])\n cls.expected_time = data.mean(dim='time')\n\nclass TestVariance(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Variance\n cls.expected_full = data.var()\n cls.expected_latlon = data.var(dim=['lat', 'lon'])\n cls.expected_time = data.var(dim='time')\n\nclass TestStandardDeviation(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = StandardDeviation\n cls.expected_full = data.std()\n cls.expected_latlon = data.std(dim=['lat', 'lon'])\n cls.expected_time = data.std(dim='time')\n\nclass TestSkew(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Skew\n n, m, l = data.shape\n cls.expected_full = xr.DataArray(scipy.stats.skew(data.data.reshape(n*m*l), nan_policy='omit'))\n cls.expected_latlon = scipy.stats.skew(data.data.reshape((n*m, l)), axis=0, nan_policy='omit')\n cls.expected_time = scipy.stats.skew(data, axis=2, nan_policy='omit')\n\nclass TestKurtosis(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Kurtosis\n n, m, l = data.shape\n cls.expected_full = xr.DataArray(scipy.stats.kurtosis(data.data.reshape(n*m*l), nan_policy='omit'))\n cls.expected_latlon = scipy.stats.kurtosis(data.data.reshape((n*m, l)), axis=0, nan_policy='omit')\n cls.expected_time = scipy.stats.kurtosis(data, axis=2, nan_policy='omit')\n\nclass TestMedian(BaseTests):\n @classmethod\n def setup_class(cls):\n cls.NodeClass = Median\n cls.expected_full = data.median()\n cls.expected_latlon = data.median(dim=['lat', 'lon'])\n cls.expected_time = data.median(dim='time')\n\n# class TestPercentile(BaseTests):\n# @classmethod\n# def setup_class(cls):\n# cls.NodeClass = Percentile\n\nclass TestGroupReduce(object):\n pass\n\nclass TestDayOfYear(object):\n pass\n","repo_name":"ccuadrado/podpac","sub_path":"podpac/core/algorithm/test/test_stats.py","file_name":"test_stats.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"3495089038","text":"import os\nimport pandas as pd\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef init_browser():\n # Replace the path with your actual path to the chromedriver\n executable_path = {\"executable_path\": \"C:\\\\Users\\\\keg827\\\\ChromeDriver\\\\chromedriver.exe\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\n###### VARIABLES #######\nnews_title = []\nnews_teaser = []\nimage_feature = []\ntweets = []\nthumbs = []\ntitles = []\nnews = {}\n\n\n###### SCRAPE FUNCTION #######\n\ndef scrape():\n\n #### SCRAPE NEWS TITLE #####\n # URL of page to be scraped\n news_title_url = 'https://mars.nasa.gov/news'\n # Retrieve page with the requests module\n news_title_response = requests.get(news_title_url)\n \n # Create BeautifulSoup object; parse with 'html.parser'\n soup = BeautifulSoup(news_title_response.text, 'html.parser')\n \n for div in soup.findAll('div', {'class': 'content_title'}):\n a = div.findAll('a')\n for link in a:\n href = link.get('href')\n headline_base = link.get_text()\n headline_strip_front = headline_base.lstrip()\n headline_strip_back = headline_strip_front.rstrip()\n #print(href)\n #print(headline)\n news_title.append(headline_strip_back)\n \n news[\"news_title\"] = news_title[0]\n \n ####### SCRAPE NEWS TEASER #########\n\n # URL of page to be scraped\n news_teaser_url = 'https://mars.nasa.gov/news'\n # Retrieve page with the requests module\n news_teaser_response = requests.get(news_teaser_url)\n \n # Create BeautifulSoup object; parse with 'html.parser'\n soup = BeautifulSoup(news_teaser_response.text, 'html.parser')\n #print(soup.prettify())\n \n for div in soup.findAll('div', {'class': 'rollover_description_inner'}):\n \n teaser_base = div.get_text()\n teaser_front = teaser_base.lstrip()\n teaser_back = teaser_front.rstrip()\n #print(teaser)\n news_teaser.append(teaser_back)\n \n news[\"news_teaser\"] = news_teaser[0]\n #return(news)\n\n ######### SCRAPE MARS FEATURE IMAGE ##########\n\n browser = init_browser()\n image_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(image_url)\n \n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n browser.click_link_by_partial_text('FULL IMAGE')\n \n for image in soup.findAll('img'):\n src = image['src']\n full_link = 'https://www.jpl.nasa.gov' + src\n image_feature.append(full_link)\n #print(full_link)\n \n news[\"image\"] = image_feature[3]\n \n ######## SCRAPE WEATHER TWEET #########\n\n # URL of page to be scraped\n weather_url = 'https://twitter.com/marswxreport?lang=en'\n # Retrieve page with the requests module\n weather_response = requests.get(weather_url)\n \n # Create BeautifulSoup object; parse with 'html.parser'\n soup = BeautifulSoup(weather_response.text, 'html.parser')\n #print(soup.prettify())\n \n for p in soup.findAll('p', class_=\"tweet-text\"):\n tweet = p.get_text()\n #print(tweet)\n tweets.append(tweet)\n \n news[\"tweet\"] = tweets[0]\n \n #print(news)\n #return(news)\n\n ######### SCRAPE FACTS TABLE #############\n\n # URL of page to be scraped\n facts_url = 'https://space-facts.com/mars/'\n # Retrieve page with the requests module\n facts_response = requests.get(facts_url)\n \n # Create BeautifulSoup object; parse with 'html.parser'\n soup = BeautifulSoup(facts_response.text, 'html.parser')\n #print(soup.prettify())\n \n table = soup.find('table', attrs={'id':'tablepress-mars'})\n table_rows = table.find_all('tr')\n\n res = []\n for tr in table_rows:\n td = tr.find_all('td')\n row = [tr.text.strip() for tr in td if tr.text.strip()]\n if row:\n res.append(row)\n\n df = pd.DataFrame(res, columns=[\"Fact\", \"Value\"])\n #print(df)\n # for fact in df[\"Fact\"]:\n #print(fact)\n \n df[\"Fact\"] = df[\"Fact\"].str.replace(' ', '_')\n df['Fact'] = df['Fact'].str.rstrip(':')\n news[df[\"Fact\"][0]] = df[\"Value\"][0]\n news[df[\"Fact\"][1]] = df[\"Value\"][1]\n news[df[\"Fact\"][2]] = df[\"Value\"][2]\n news[df[\"Fact\"][3]] = df[\"Value\"][3]\n news[df[\"Fact\"][4]] = df[\"Value\"][4]\n news[df[\"Fact\"][5]] = df[\"Value\"][5]\n news[df[\"Fact\"][6]] = df[\"Value\"][6]\n news[df[\"Fact\"][7]] = df[\"Value\"][7]\n news[df[\"Fact\"][8]] = df[\"Value\"][8]\n \n #print(news)\n #return(new) \n \n ######### SCRAPE HEMISPHERE IMAGES #########\n # URL of page to be scraped\n hemisphere_image_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n # Retrieve page with the requests module\n hemisphere_image_response = requests.get(hemisphere_image_url)\n \n # Create BeautifulSoup object; parse with 'html.parser'\n soup = BeautifulSoup(hemisphere_image_response.text, 'html.parser')\n #print(soup.prettify())\n \n for image in soup.findAll('img', class_=\"thumb\"):\n src = image['src']\n full_link = 'https://astrogeology.usgs.gov' + src\n thumbs.append(full_link)\n #print(thumbs)\n #return(thumbs)\n \n ######### SCRAPE HEMISPHERE TITLES #########\n # URL of page to be scraped\n hemisphere_title_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n # Retrieve page with the requests module\n hemisphere_title_response = requests.get(hemisphere_title_url)\n \n # Create BeautifulSoup object; parse with 'html.parser'\n soup = BeautifulSoup(hemisphere_title_response.text, 'html.parser')\n #print(soup.prettify())\n \n \n for div in soup.findAll('div', class_=\"description\"):\n image_titles = div.findAll(\"h3\")\n #print(image_titles)\n for image_title in image_titles:\n title = image_title.get_text()\n titles.append(title)\n \n #print(titles)\n #return(titles)\n\n\n hemisphere_images = pd.DataFrame({\n 'title': titles,\n 'image_url': thumbs\n })\n\n hemisphere_images[\"title\"] = hemisphere_images[\"title\"].str.replace(' ', '_')\n\n news[hemisphere_images[\"title\"][0]] = hemisphere_images[\"image_url\"][0]\n news[hemisphere_images[\"title\"][1]] = hemisphere_images[\"image_url\"][1]\n news[hemisphere_images[\"title\"][2]] = hemisphere_images[\"image_url\"][2]\n news[hemisphere_images[\"title\"][3]] = hemisphere_images[\"image_url\"][3]\n\n ######### RETURN NEWS DICTIONARY ###########\n return(news) \n \n\nscrape()","repo_name":"kglibrarian/datasciencebootcamp","sub_path":"Web Scraping/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73431076358","text":"\"\"\"\nUnit tests for RoBERTa utils.\n\"\"\"\n\nimport os\nimport unittest\n\nfrom texar.torch.modules.pretrained.roberta import *\nfrom texar.torch.utils.test import pretrained_test\n\n\nclass RoBERTaUtilsTest(unittest.TestCase):\n r\"\"\"Tests RoBERTa utils.\n \"\"\"\n\n @pretrained_test\n def test_load_pretrained_roberta_AND_transform_roberta_to_texar_config(\n self):\n\n pretrained_model_dir = PretrainedRoBERTaMixin.download_checkpoint(\n pretrained_model_name=\"roberta-base\")\n\n info = list(os.walk(pretrained_model_dir))\n _, _, files = info[0]\n self.assertIn('dict.txt', files)\n self.assertIn('model.pt', files)\n self.assertIn('NOTE', files)\n\n model_config = PretrainedRoBERTaMixin._transform_config(\n pretrained_model_name=\"roberta-base\",\n cache_dir=pretrained_model_dir)\n\n exp_config = {\n 'hidden_size': 768,\n 'embed': {\n 'name': 'word_embeddings',\n 'dim': 768\n },\n 'vocab_size': 50265,\n 'position_embed': {\n 'name': 'position_embeddings',\n 'dim': 768\n },\n 'position_size': 514,\n 'encoder': {\n 'name': 'encoder',\n 'embedding_dropout': 0.1,\n 'num_blocks': 12,\n 'multihead_attention': {\n 'use_bias': True,\n 'num_units': 768,\n 'num_heads': 12,\n 'output_dim': 768,\n 'dropout_rate': 0.1,\n 'name': 'self'\n },\n 'residual_dropout': 0.1,\n 'dim': 768,\n 'use_bert_config': True,\n 'poswise_feedforward': {\n 'layers': [\n {\n 'type': 'Linear',\n 'kwargs': {\n 'in_features': 768,\n 'out_features': 3072,\n 'bias': True\n }\n },\n {'type': 'BertGELU'},\n {\n 'type': 'Linear',\n 'kwargs': {\n 'in_features': 3072,\n 'out_features': 768,\n 'bias': True\n }\n }\n ]\n }\n }\n }\n\n self.assertDictEqual(model_config, exp_config)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"li3cmz/GRADE","sub_path":"texar-pytorch/texar/torch/modules/pretrained/roberta_test.py","file_name":"roberta_test.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"62"} +{"seq_id":"31648873740","text":"import numpy as np\nimport cv2 as cv\nfrom enum import Enum\nfrom time import time, sleep\n\nclass COLOR(Enum):\n BLACK = [0,0,0]\n BLUE = [255,0,0]\n GREEN = [0,255,0]\n RED = [0,0,255]\n RED4 = [191,191,255]\n YELLOW = [0,255,255]\n ORANGE = [0,127,255]\n MAGENTA = [255,0,255]\n GREY = [127,127,127]\n WHITE = [255,255,255]\n\nLocation = tuple[int, int]\n\nclass Minimap:\n\n map = None\n matrix = None\n \n\n def __init__(self, name, start=(0,0), square_size=5, radius=55, line_width=1):\n self.start = start\n self.current = start\n self.walkable = []\n self.blocked = []\n self.warpable = []\n self.catchable = []\n self.consumable = []\n self.path = []\n\n self.name = name\n self.square_size = square_size\n self.radius = radius\n unit = square_size+(2*line_width)\n side = (1+radius*2)\n self.map_size = (side * square_size) + (side+1) * line_width \n self.line_width = line_width\n\n def update(self, current, walkable=[], blocked=[], warpable=[], catchable=[], consumable=[], path=[]):\n self.current = current\n if walkable != None: self.walkable = walkable\n if blocked != None: self.blocked = blocked\n if warpable != None: self.warpable = warpable\n if catchable != None: self.catchable = catchable\n if consumable != None: self.consumable = consumable\n if path != None: self.path = path\n self.map = None\n\n def set_path(self, path):\n self.path = path\n self.map = None\n \n \n def set_current(self, current):\n self.current = current\n self.map = None\n\n # def __translate(self, node):\n # x, y = node[:2]\n # return (x+self.radius, self.__grid_size()-(y+self.radius))\n\n # def __add_node(self, node, v):\n # x, y = self.__translate(node)\n # self.matrix[y][x] = v\n\n # def __grid_size(self):\n # return self.radius*2+1\n\n # def render_matrix(self):\n # gs = self.__grid_size()\n # self.matrix = np.full((gs,gs), 0, int)\n\n # for w in self.walkable:\n # self.__add_node(w,1)\n\n # for b in self.blocked:\n # self.__add_node(b,2)\n\n # for h in self.highlighted:\n # self.__add_node(h,3)\n\n # return self.matrix\n\n def __calculate_points(self, position):\n lw = self.line_width\n ss = self.square_size\n offset = int(self.map_size -ss)/2\n\n px, py = position[:2]\n\n x1=int(px*(ss+lw)+offset)\n y1=abs(int(py*(ss+lw)-offset))\n x2=int(x1+ss-1)\n y2=int(y1+ss-1)\n return x1, y1, x2, y2\n\n # def __pt1_to_position(self, pt1):\n # lw = self.line_width\n # ss = self.square_size\n # offset = int(self.map_size -ss)/2\n # hss = int(ss/2)\n\n # x1, y1 = pt1[:2]\n\n # x = int(x1/(ss+lw))-offset+hss\n # y = abs(int(y1/(ss+lw))+offset)+hss\n\n # return x, y\n\n def __render_square(self, position, color):\n x1, y1, x2, y2 = self.__calculate_points(position)\n cv.rectangle(self.map, (x1, y1), (x2, y2), color.value, cv.FILLED)\n\n # def __render_dot(self, position, color):\n # x1, y1, x2, y2 = self.__calculate_points(position)\n # x, y = int((x1+x2)/2), int((y1+y2)/2)\n # self.map[y,x] = color.value\n\n def __render_dot(self, position, color):\n x1, y1, x2, y2 = self.__calculate_points(position)\n cv.rectangle(self.map, (x1+1, y1+1), (x2-1, y2-1), color.value, cv.FILLED)\n\n # def __calculate_viewport(self, current):\n # x, y = current\n # return list(set(self.walkable).intersection([(x,y+1),(x+1,y+1),(x+1,y),(x+1,y-1),(x,y-1),(x-1,y-1),(x-1,y),(x-1,y+1)]))\n\n def render(self):\n self.map = np.zeros((self.map_size, self.map_size, 3), np.uint8)\n \n for e in self.walkable:\n self.__render_square(e, COLOR.WHITE)\n\n for b in self.blocked:\n self.__render_square(b, COLOR.GREY)\n\n for w in self.warpable:\n self.__render_square(w, COLOR.BLUE)\n \n for c in self.catchable:\n self.__render_square(c, COLOR.MAGENTA)\n\n for i in self.consumable:\n self.__render_square(i, COLOR.ORANGE)\n\n # view = []\n # for v in self.__calculate_viewport(self.current):\n # view.append(v)\n # view.extend(self.__calculate_viewport(v))\n # for v in view: \n # self.__render_square(v, COLOR.RED4)\n \n \n self.__render_square(self.start, COLOR.GREEN)\n self.__render_square(self.current, COLOR.RED)\n\n for p in self.path:\n self.__render_dot(p, COLOR.RED)\n\n return self.map\n\n def save(self, path):\n if not type(self.map) == np.ndarray:\n self.render()\n cv.imwrite(path, self.map)\n\n def show(self, blocking=False):\n if not type(self.map) == np.ndarray:\n self.render()\n cv.imshow(self.name, self.map)\n cv.waitKey(int(not blocking))\n\n # def edit_map(self, path):\n # if path:\n # self.map = cv.imread(path)\n \n \n # while True:\n # cv.imshow(self.name, self.map)\n # key = cv.waitKey(25)\n # if key == ord('q'):\n # cv.destroyAllWindows()\n # break\n # if key == ord('s'):\n # cv.imwrite(f'images/minimap_{time()}.jpg', self.map)\n\n def load_map(self, path):\n img = cv.imread(path)\n # h, w = img.shape[:2]\n start = (0,0)\n current = (0,0)\n walkable = []\n blocked = []\n warpable = []\n catchable = []\n consumable = []\n\n r = self.radius \n # ss = self.square_size\n # width = w/ss\n # height = h/ss\n # hss = int(ss/2)\n\n # square = (self.radius*-1, self.radius)\n # pixel = (hss,hss)\n\n def sample(pos):\n atol = 0\n rtol = 10\n hss = int(self.square_size/2)\n x1, y1 = self.__calculate_points(pos)[:2]\n c = img[y1+hss,x1+hss]\n if np.allclose(c,COLOR.BLACK.value,atol,rtol):\n return\n elif np.allclose(c,COLOR.WHITE.value,atol,rtol):\n walkable.append(pos)\n # elif np.allclose(c,COLOR.GREEN.value,atol,rtol):\n # start = pos \n # walkable.append(pos)\n elif np.allclose(c,COLOR.GREY.value,atol,rtol):\n blocked.append(pos)\n elif np.allclose(c,COLOR.BLUE.value,atol,rtol):\n warpable.append(pos)\n blocked.append(pos)\n elif np.allclose(c,COLOR.MAGENTA.value,atol,rtol):\n catchable.append(pos)\n walkable.append(pos)\n elif np.allclose(c,COLOR.ORANGE.value,atol,rtol):\n consumable.append(pos)\n blocked.append(pos)\n elif np.allclose(c,COLOR.RED.value,atol,rtol):\n current = pos\n walkable.append(pos)\n\n for x in range(-r, r+1, 1):\n for y in range(r, -r-1, -1):\n sample((x,y))\n print(f'loaded current={self.current}, walkable={len(walkable)}, blocked={len(blocked)}, warpable={len(warpable)} catchable={len(catchable)} consumable={len(consumable)}')\n self.update(current,walkable,blocked,warpable,catchable,consumable)\n ","repo_name":"theorenck/pm-bot","sub_path":"minimap.py","file_name":"minimap.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"657800687","text":"\"\"\"Module for functions related to in_service commands\"\"\"\nfrom typing import List\n\nfrom riptide.config.document.app import App\nfrom riptide.config.document.command import Command\nfrom riptide.config.document.project import Project\nfrom riptide.engine.abstract import AbstractEngine\n\n\ndef convert_in_service_to_normal(app: App, command_name: str) -> Command:\n \"\"\"\n Converts the 'in_service' command identified by `command_name` in `app`\n to a regular command. Image, 'config_from_roles' and additional volumes are based on the\n service that the 'in_service' command was supposed to be run in.\n \"\"\"\n old_cmd = app['commands'][command_name]\n service = app['services'][old_cmd.get_service(app)]\n\n env = {}\n env.update(service['environment'] if 'environment' in service else {})\n env.update(old_cmd['environment'] if 'environment' in old_cmd else {})\n new_cmd = Command.from_dict({\n '$name': command_name,\n 'image': service['image'],\n 'command': old_cmd['command'],\n 'additional_volumes': service['additional_volumes'] if 'additional_volumes' in service else {},\n 'environment': env,\n 'config_from_roles': [old_cmd['in_service_with_role']],\n 'use_host_network': old_cmd['use_host_network'] if 'use_host_network' in old_cmd else False\n })\n new_cmd.parent_doc = app\n new_cmd.freeze()\n return new_cmd\n\n\ndef run(engine: AbstractEngine, project: Project, command_name: str, arguments: List[str]) -> int:\n \"\"\"\n Runs an in_service command.\n If the service for the command is started, command is executed in that service container.\n Otherwise a new container is started.\n\n Returns exit code of command.\n \"\"\"\n cmd = project[\"app\"][\"commands\"][command_name]\n service = cmd.get_service(project[\"app\"])\n\n if engine.service_status(project, service):\n # Container is running, run in there\n return engine.cmd_in_service(project, command_name, service, arguments)\n else:\n # Container is not running, start a new container\n old_cmd = cmd\n project[\"app\"][\"commands\"][command_name] = convert_in_service_to_normal(project[\"app\"], command_name)\n ret_code = engine.cmd(project, command_name, arguments)\n project[\"app\"][\"commands\"][command_name] = old_cmd\n return ret_code\n","repo_name":"theCapypara/riptide-lib","sub_path":"riptide/config/command/in_service.py","file_name":"in_service.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"33515725756","text":"import unittest\nfrom unittest.mock import Mock\n\nimport elasticsearch_follow\n\n\nclass TestDefaultProcessor(unittest.TestCase):\n def test_default_processor(self):\n processor = elasticsearch_follow.DefaultProcessor()\n es_follow = Mock()\n es_follow.get_new_lines.return_value = [\n {\"msg\": \"line1\", \"@timestamp\": \"2019-01-01T10:01:00\"}\n ]\n follower = elasticsearch_follow.Follower(\n es_follow, \"some_index\", 120, processor\n )\n\n generator = follower.generator()\n\n self.assertEqual(next(generator), \"2019-01-01T10:01:00 line1\")\n","repo_name":"mdreem/elasticsearch_follow","sub_path":"tests/test_default_processor.py","file_name":"test_default_processor.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"23587067568","text":"# ******************************************************************************\n# Title : Pythonで学ぶ実験計画法\n# Chapter : 3 データ解析や回帰分析の手法\n# Theme : サポートベクター回帰(線形カーネル)\n# Date : 2021/11/24\n# Page : P60 - P67\n# ******************************************************************************\n\n\n# <概要>\n# - クラス分類手法であるサポートベクターマシンを回帰分析に応用した手法\n# --- カーネルトリックにより線形モデルを非線形に拡張している\n\n\n# <目次>\n# 0 準備\n# 1 データ定義\n# 2 データ分割\n# 3 データ加工\n# 4 ハイパーパラメータのチューニング\n# 5 モデル構築\n# 6 プロット作成\n# 7 予測精度の確認\n# 8 テストデータによる検証\n\n\n# 0 準備 ----------------------------------------------------------------\n\n# ライブラリ\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, KFold, GridSearchCV\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\n\n\n# データ準備\ndf = pd.read_csv('csv/resin.csv', index_col=0, header=0)\n\n\n# 1 データ定義 ------------------------------------------------------------\n\n# <ポイント>\n# - 目的変数はpropertyとして、その他のデータを説明変数とする\n\n\n# データ定義\n# --- 目的変数\n# --- 説明変数\ny = df.iloc[:, 0]\nx = df.iloc[:, 1:]\n\n\n# 2 データ分割 ----------------------------------------------------------------\n\n# パラメータ設定\n# --- テストデータのサンプル数\nnumber_of_test_samples = 5\n\n# データ分割\nif number_of_test_samples == 0:\n x_train = x.copy()\n x_test = x.copy()\n y_train = y.copy()\n y_test = y.copy()\nelse:\n x_train, x_test, y_train, y_test = train_test_split(x, y,\n test_size=number_of_test_samples,\n shuffle=True,\n random_state=99)\n\n\n# 3 データ加工 ---------------------------------------------------------------\n\n# <ポイント>\n# - ツリーモデルでは元データをそのまま用いて学習器を生成する(Zスコア変換する必要はない)\n\n\n# ゼロ・バリアンス・フィルタ\ndeleting_variables = x_train.columns[x_train.std() == 0]\nx_train = x_train.drop(deleting_variables, axis=1)\nx_test = x_test.drop(deleting_variables, axis=1)\n\n# データの標準化\nautoscaled_y_train = (y_train - y_train.mean()) / y_train.std()\nautoscaled_x_train = (x_train - x_train.mean()) / x_train.std()\n\n\n# 4 ハイパーパラメータのチューニング ----------------------------------------------\n\n# パラメータ設定\n# --- クロスバリデーションのFold数\n# --- 線形SVR のCの候補\n# --- 線形SVRのεの候補\nfold_number = 10\nlinear_svr_cs = 2 ** np.arange(-10, 5, dtype=float)\nlinear_svr_epsilons = 2 ** np.arange(-10, 0, dtype=float)\n\n# クロスバリデーションの分割の設定\ncross_validation = KFold(n_splits=fold_number, random_state=9, shuffle=True)\n\n# グリッドサーチの設定\ngs_cv = GridSearchCV(SVR(kernel='linear'),\n {'C': linear_svr_cs, 'epsilon': linear_svr_epsilons},\n cv=cross_validation)\n\n# グリッドサーチ + クロスバリデーション実施\ngs_cv.fit(autoscaled_x_train, autoscaled_y_train)\n\n# 最適パラメータ\n# --- C\n# --- ε\noptimal_linear_svr_c = gs_cv.best_params_['C']\noptimal_linear_svr_epsilon = gs_cv.best_params_['epsilon']\n\n\n# 5 モデル構築 -----------------------------------------------------------------------\n\n# インスタンス生成\nmodel = SVR(kernel='linear', C=optimal_linear_svr_c, epsilon=optimal_linear_svr_epsilon)\n\n# モデル構築\nmodel.fit(X=autoscaled_x_train, y=autoscaled_y_train)\n\n# 確認\nvars(model)\n\n# トレーニングデータを用いた予測\nautoscaled_estimated_y_train = model.predict(autoscaled_x_train)\n\n# スケールをもとに戻す\nestimated_y_train = autoscaled_estimated_y_train * y_train.std() + y_train.mean()\nestimated_y_train = pd.DataFrame(estimated_y_train, index=x_train.index, columns=['estimated_y'])\n\n\n# 6 プロット作成 ----------------------------------------------------------------------\n\n# パラメータ設定\nplt.rcParams['font.size'] = 12\n\n# プロット定義\n# --- 散布図(実測値 vs 推定値)\nplt.scatter(y_train, estimated_y_train.iloc[:, 0], c='blue')\n\n# プロット範囲の取得\ny_max = max(y_train.max(), estimated_y_train.iloc[:, 0].max())\ny_min = min(y_train.min(), estimated_y_train.iloc[:, 0].min())\n\n# プロット設定\n# --- 取得した最小値-5%から最大値+5%まで、対角線を作成\n# --- 図の形を正方形にする\nplt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')\nplt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlabel('actual y')\nplt.ylabel('estimated y')\nplt.gca().set_aspect('equal', adjustable='box')\n\n# プロット表示\nplt.show()\n\n\n# 7 予測精度の確認 ----------------------------------------------------------\n\n# メトリック出力\n# --- R2は1、MSEとMAEは0\nr2_score(y_true=y_train, y_pred=estimated_y_train)\nmean_squared_error(y_true=y_train, y_pred=estimated_y_train)\nmean_absolute_error(y_true=y_train, y_pred=estimated_y_train)\n\n\n# 8 テストデータによる検証 --------------------------------------------------\n\n# データ標準化\n# --- 訓練データに対して基準化する点に注意\nautoscaled_x_test = (x_test - x_train.mean()) / x_train.std()\n\n# 予測値の出力\nautoscaled_estimated_y_test = model.predict(autoscaled_x_test)\n\n# スケールを元に戻す\nestimated_y_test = autoscaled_estimated_y_test * y_train.std() + y_train.mean()\n\n# データフレーム格納\nestimated_y_test = pd.DataFrame(estimated_y_test, index=x_test.index, columns=['estimated_y'])\n\n# メトリック出力\nr2_score(y_true=y_test, y_pred=estimated_y_test)\nmean_squared_error(y_true=y_test, y_pred=estimated_y_test, squared=False)\nmean_absolute_error(y_true=y_test, y_pred=estimated_y_test)\n\n# プロット作成\n# --- テストデータなので5サンプルしかない\nplt.rcParams['font.size'] = 12\nplt.scatter(y_test, estimated_y_test.iloc[:, 0], c='blue')\ny_max = max(y_test.max(), estimated_y_test.iloc[:, 0].max())\ny_min = min(y_test.min(), estimated_y_test.iloc[:, 0].min())\nplt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')\nplt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\nplt.xlabel('actual y')\nplt.ylabel('estimated y')\nplt.gca().set_aspect('equal', adjustable='box')\nplt.show()\n","repo_name":"delta0726/py-machine_learning","sub_path":"book/py_doe/03-10_線形カーネル.py","file_name":"03-10_線形カーネル.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18111351992","text":"import json\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom boto3.session import Session\nimport datetime\nimport uuid\nimport datetime\nfrom datetime import timedelta\nfrom dateutil.tz import *\n\n\ndef lambda_handler(event, context):\n id_name = \"taskArn\"\n\n new_record = {}\n # For debugging so you can see raw event format.\n print('Here is the event:')\n print(json.dumps(event))\n \n session = boto3.session.Session()\n region = session.region_name\n\n db = boto3.resource('dynamodb')\n table = db.Table('initDB')\n initialised = table.scan(\n FilterExpression=Attr('initialized').eq(True) \n )['Items']\n\n if not initialised:\n init_db(region=region)\n\n\n if event[\"source\"] != \"aws.ecs\" and event[\"detail-type\"] != \"ECS Task State Change\":\n raise ValueError(\"Function only supports input from events with a source type of: aws.ecs and of type - ECS Task State Change -\")\n\n if event[\"detail\"][\"lastStatus\"] == event[\"detail\"][\"desiredStatus\"]:\n event_id = event[\"detail\"][\"taskArn\"]\n\n s = Session()\n cur_region = s.region_name\n dynamodb = boto3.resource(\"dynamodb\", region_name=cur_region)\n table = dynamodb.Table(\"ECSTaskStatus\")\n saved_event = table.get_item( Key = { id_name : event_id } )\n \n # Look first to see if you have received this taskArn before.\n # If not,\n # - you are getting a new task that has just started, or the Lambda solution was deployed\n # after the task started and it is being stopped now.\n # - store its details in DDB\n # If yes,\n # - that just means that you are receiving a task change - mostly a stop event.\n # - store the stop time in the task item in DDB\n if \"Item\" in saved_event:\n if event[\"detail\"][\"lastStatus\"] == \"STOPPED\":\n #table.update_item( Key= { id_name : event_id },\n # AttributeUpdates= {\n # 'stoppedAt': {'S': event[\"detail\"][\"stoppedAt\"]},\n # },\n #)\n table.update_item( Key= { id_name : event_id },\n UpdateExpression=\"set stoppedAt = :d, runTime=:t\",\n ExpressionAttributeValues={\n ':d': str(event[\"detail\"][\"stoppedAt\"]),\n ':t': getRunTime(event[\"detail\"][\"startedAt\"], event[\"detail\"][\"stoppedAt\"])\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n print(\"Saving updated event - ID \" + event_id)\n else:\n # This could be if the task has just started, or\n # The Lambda is deployed after the task has started running.\n # In this case, the task event will only be raised when it is stopped.\n new_record[\"launchType\"] = event[\"detail\"][\"launchType\"]\n new_record[\"region\"] = event[\"region\"]\n new_record[\"clusterArn\"] = event[\"detail\"][\"clusterArn\"]\n new_record[\"cpu\"] = event[\"detail\"][\"cpu\"]\n new_record[\"memory\"] = event[\"detail\"][\"memory\"]\n if new_record[\"launchType\"] == 'FARGATE':\n new_record[\"containerInstanceArn\"] = 'INSTANCE_ID_UNKNOWN'\n (new_record['instanceType'], new_record['osType'], new_record['instanceId']) = ('INSTANCE_TYPE_UNKNOWN', 'linux', 'INSTANCE_ID_UNKNOWN')\n else:\n new_record[\"containerInstanceArn\"] = event[\"detail\"][\"containerInstanceArn\"]\n (new_record['instanceType'], new_record['osType'], new_record['instanceId']) = getInstanceType(event['region'], event['detail']['clusterArn'], event['detail']['containerInstanceArn'], event['detail']['launchType'])\n\n if ':' in event[\"detail\"][\"group\"]:\n new_record[\"group\"], new_record[\"groupName\"] = event[\"detail\"][\"group\"].split(':')\n else:\n new_record[\"group\"], new_record[\"groupName\"] = 'taskgroup', event[\"detail\"][\"group\"]\n\n # Not provided in FARGATE - new_record[\"pullStartedAt\"] = event[\"detail\"][\"pullStartedAt\"]\n new_record[\"startedAt\"] = event[\"detail\"][\"startedAt\"]\n new_record[\"taskArn\"] = event_id\n new_record['stoppedAt'] = 'STILL-RUNNING'\n new_record['runTime'] = 0\n\n if event[\"detail\"][\"lastStatus\"] == \"STOPPED\":\n new_record['stoppedAt'] = event[\"detail\"][\"stoppedAt\"]\n new_record['runTime'] = getRunTime(event[\"detail\"][\"startedAt\"], event[\"detail\"][\"stoppedAt\"])\n \n table.put_item( Item=new_record )\n print(\"Saving new event - ID \" + event_id)\n \ndef getInstanceType(region, cluster, instance, launchType):\n instanceType = 'INSTANCE_TYPE_UNKNOWN'\n osType = 'linux'\n instanceId = 'INSTANCE_ID_UNKNOWN'\n \n # Shouldnt care about isntanceType if this is a FARGATE task\n if launchType == 'FARGATE':\n return (instanceType, osType, instanceId)\n \n ecs = boto3.client(\"ecs\")\n try:\n result = ecs.describe_container_instances(cluster=cluster, containerInstances=[instance])\n if result and 'containerInstances' in result:\n attr_dict = result['containerInstances'][0]['attributes']\n \n instanceId = result['containerInstances'][0][\"ec2InstanceId\"]\n \n instance_type = [d['value'] for d in attr_dict if d['name'] == 'ecs.instance-type']\n if len(instance_type):\n # Return the instanceType. In addition, store this value in a DynamoDB table.\n instanceType = instance_type[0]\n \n os_type = [d['value'] for d in attr_dict if d['name'] == 'ecs.os-type']\n if len(os_type):\n # Return the osType. In addition, store this value in a DynamoDB table.\n osType = os_type[0]\n \n # Else - if describe_instances doesnt return a result, make a last attempt check in DynamoDB table\n # that keeps a mapping of containerInstanceARN to instanceType\n return (instanceType, osType, instanceId)\n except:\n # Try finding the instanceType in DynamoDB table\n return (instanceType, osType, instanceId)\n \ndef getRunTime(startTime, stopTime):\n runTime = '0.0'\n start = datetime.datetime.strptime(startTime, '%Y-%m-%dT%H:%M:%S.%fZ')\n stop = datetime.datetime.strptime(stopTime, '%Y-%m-%dT%H:%M:%S.%fZ')\n runTime = (stop-start).total_seconds()\n return int(round((runTime)))\n\ndef putTasks(region, cluster, task):\n id_name = 'taskArn'\n task_id = task[\"taskArn\"]\n new_record = {}\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=region)\n table = dynamodb.Table(\"ECSTaskStatus\")\n saved_task = table.get_item(Key={id_name: task_id})\n\n # Look first to see if you have received this taskArn before.\n # If not,\n # - you are getting a new task - i.e. the script is being run for the first time.\n # - store its details in DDB\n # If yes,\n # - the script is being run after the solution has been deployed.\n # - dont do anything. quit.\n if \"Item\" in saved_task:\n print(\"Task: %s already in the DynamoDB table.\" % (task_id))\n return 1\n else:\n new_record[\"launchType\"] = task[\"launchType\"]\n new_record[\"region\"] = region\n new_record[\"clusterArn\"] = task[\"clusterArn\"]\n new_record[\"cpu\"] = task[\"cpu\"]\n new_record[\"memory\"] = task[\"memory\"]\n if new_record[\"launchType\"] == 'FARGATE':\n new_record[\"containerInstanceArn\"] = 'INSTANCE_ID_UNKNOWN'\n (new_record['instanceType'], new_record['osType'], new_record['instanceId']) = (\n 'INSTANCE_TYPE_UNKNOWN', 'linux', 'INSTANCE_ID_UNKNOWN')\n else:\n new_record[\"containerInstanceArn\"] = task[\"containerInstanceArn\"]\n (new_record['instanceType'], new_record['osType'], new_record['instanceId']) = getInstanceType(\n region, task['clusterArn'], task['containerInstanceArn'], task['launchType'])\n\n if ':' in task[\"group\"]:\n new_record[\"group\"], new_record[\"groupName\"] = task[\"group\"].split(\n ':')\n else:\n new_record[\"group\"], new_record[\"groupName\"] = 'taskgroup', task[\"group\"]\n\n # Convert startedAt time to UTC from local timezone. The time returned from ecs_describe_tasks() will be in local TZ.\n startedAt = task[\"startedAt\"].astimezone(tzutc())\n new_record[\"startedAt\"] = datetime.datetime.strftime(\n startedAt, '%Y-%m-%dT%H:%M:%S.%fZ')\n new_record[\"taskArn\"] = task_id\n new_record['stoppedAt'] = 'STILL-RUNNING'\n new_record['runTime'] = 0\n\n table.put_item(Item=new_record)\n return 0\n\ndef init_db(region: str):\n\n \n ecs = boto3.client(\"ecs\", region_name=region)\n response = ecs.list_clusters()\n\n clusters = []\n if 'clusterArns' in response and response['clusterArns']:\n clusters = response['clusterArns']\n\n tasks = []\n for cluster in clusters:\n nextToken = ''\n while True:\n response = ecs.list_tasks(\n cluster=cluster, maxResults=100, nextToken=nextToken)\n tasks = tasks + [(cluster, taskArn)\n for taskArn in response['taskArns']]\n if 'nextToken' in response and response['nextToken']:\n nextToken = response['nextToken']\n else:\n break\n\n for (cluster, task) in tasks:\n # Use range function to get maybe 10 tasks at a time.\n # taskDetails = ecs.describe_tasks(cluster=cluster, tasks=[task])\n\n taskDetails = ecs.describe_tasks(cluster=cluster, tasks=[task])\n\n # Get all tasks in the cluster and make an entry in DDB.\n tasks = putTasks(region, cluster, taskDetails['tasks'][0])\n\n db = boto3.resource('dynamodb')\n table = db.Table('initDB')\n\n table.put_item(\n Item={\n 'id':uuid.uuid4().hex,\n 'initialized':True,\n 'date':datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n }\n )","repo_name":"vncgrvs/ecs-metering","sub_path":"terraform/modules/lambda/metering_lambda/ecsTaskStatus.py","file_name":"ecsTaskStatus.py","file_ext":"py","file_size_in_byte":10218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34467404630","text":"import copy\nfrom collections import defaultdict\ndef pick(d, i, cards):\n res = 0\n d2 = d\n while True:\n if d2[i] == -1: break\n temp = i\n i = d2[i]\n d2[temp] = -1\n res+=1\n return res\ndef solution(cards):\n answer = 0\n d = defaultdict(int)\n for i in range(len(cards)):\n d[i+1] = cards[i]\n d2 = copy.deepcopy(d)\n for i in range(1, len(cards)+1):\n d = copy.deepcopy(d2)\n cnt1= pick(d, i, cards)\n cnt2= 0\n if cnt1 < len(cards):\n index = -1\n for j in d.keys():\n if d[j] != -1:\n index = j\n cnt2 = pick(d, index, cards)\n answer = max(cnt1 * cnt2, answer)\n return answer\n","repo_name":"soodal5629/codingTestProblemSolve","sub_path":"혼자 놀기의 달인.py","file_name":"혼자 놀기의 달인.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29762866406","text":"from bs4 import BeautifulSoup\n# from selenium import webdriver\n# from selenium.webdriver.common.by import By\nimport time\nimport requests\nurl = 'http://5850web.moneydj.com/z/zg/zgb/zgb0.djhtm?a=9200&b=9268&c=E&e=2022-6-10&f=2022-6-10'\nheaders = {'user-agent': 'Mozilla/5.0'}\nre =requests.get(url,headers=headers)\ntemp =BeautifulSoup(re.text,'html.parser')\n# for i in range(1,10):\nindex = temp.find_all('td',class_='t4t1',limit=10)\ntempp = temp.find_all('td',class_='id')\nprint(temp)\nprint('====================================================')\nfor a in temp.find_all('a',href=True):\n print(a.text)\n\n","repo_name":"CowBae7777/twstock","sub_path":"bot/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33237590592","text":"#!/usr/bin/env python\nimport json\nimport math\nimport rospy\nimport rospkg\nimport rosparam\nimport copy\n\nimport nexon_msgs.msg\n\nfrom nexon.robot import Robot\nfrom nexon.io import parse_file\nfrom nexon.interface import Commands, Sections\n\nfrom benchmark_runner.planner_interface import PlannerInterface\nfrom benchmark_runner.task_solver import create_pose_msg\nfrom benchmark_runner.exceptions import PlanningFailedError\n\n\nfrom nexon_msgs.msg import PoseConstraint\n\n\ndef create_constraint_message(con):\n pc = PoseConstraint()\n pc.relative = True\n if con[\"type\"] != \"rpy\":\n raise NotImplementedError(\"Only rpy constraints implemented for now.\")\n pc.rpy_min = con[\"min\"]\n pc.rpy_max = con[\"max\"]\n return pc\n\n\ndef execute_plans(robot, plans):\n \"\"\"\n Execute a list of plans, this list is returned when solving a task.\n \"\"\"\n # make sure the robot is actually in the home position\n # before executing a plan\n robot.mg.set_joint_value_target(\n plans[0].joint_trajectory.points[0].positions)\n robot.mg.go(wait=True)\n print(\"Moved to home, start executing task.\")\n\n # TODO quick fix, add first point to lin path\n plans[1].joint_trajectory.points.insert(\n 0, plans[0].joint_trajectory.points[-1])\n\n for plan in plans:\n print(\"========================================\")\n print(\"executing plan of lenght\")\n print(len(plan.joint_trajectory.points))\n print(plan.joint_trajectory.points[0])\n print(plan.joint_trajectory.points[1])\n print(\"\\n...\\n\")\n print(plan.joint_trajectory.points[-1])\n print(\"========================================\")\n # print(plan)\n robot.mg.execute(plan, wait=True)\n rospy.sleep(1.0)\n\n\ndef movep_sampling(pi, start_config, goal, con):\n # print(start_config)\n # print(goal)\n # print(con)\n samples = pi.sample(goal, con)\n configs = [list(q.positions) for q in samples.joint_poses]\n for q in configs:\n try:\n plan = pi.movep(start_config, goal)\n except PlanningFailedError as e:\n print(e)\n continue\n return plan\n\n raise PlanningFailedError(\"Sampling movep failed.\")\n\n\ndef plan_task(psi, task):\n # fixed assumption, the robot starts from home\n initial_config = task[Sections.VARS][\"home\"]\n\n plans = []\n var = task[Sections.VARS]\n con = task[\"constraints\"]\n\n for command in task[Sections.COMMANDS]:\n # what is the inital configuration for the current planning command?\n if len(plans) == 0:\n start_config = initial_config\n else:\n start_config = plans[-1].joint_trajectory.points[-1].positions\n\n ctype = command[\"type\"]\n print(command)\n if ctype == Commands.MOVEJ:\n plan = psi.movej(start_config, var[command[\"goal\"]])\n plans.append(plan)\n\n elif ctype == Commands.MOVEP:\n plan = movep_sampling(\n psi,\n start_config,\n create_pose_msg(var[command[\"goal\"]]),\n create_constraint_message(con[command[\"constraints\"][0]])\n )\n # plan = psi.movep(\n # start_config, create_pose_msg(var[command[\"goal\"]]))\n plans.append(plan)\n\n elif ctype == Commands.MOVELIN:\n plan = psi.movel(\n start_config, create_pose_msg(var[command[\"goal\"]]))\n plans.append(plan)\n\n else:\n raise Exception(\n \"Unkown command type: {}\".format(ctype))\n\n return plans\n\n\ndef print_task(task):\n for key in task:\n for v in task[key]:\n try:\n print(v, task[key][v])\n except TypeError:\n print(v)\n\n\nif __name__ == \"__main__\":\n planning_group_name = \"group_1\"\n\n rospy.init_node(\"execute_simple_task\")\n rospack = rospkg.RosPack()\n\n # read and get all config parameter stuff\n filepath = rosparam.get_param(\"/planning_task_path\")\n config_file = \"planning_groups.json\"\n config_file_path = rospack.get_path(\"benchmark_runner\") + \"/config/\"\n with open(config_file_path + config_file) as file:\n config = json.load(file)\n group_config = config[\"groups\"][planning_group_name]\n\n # hardcoded constraint\n # Free rotation around z-axis\n constraint = nexon_msgs.msg.PoseConstraint()\n constraint.relative = True\n constraint.rpy_min = [0, 0, -math.pi]\n constraint.rpy_max = [0, 0, math.pi]\n\n pi = PlannerInterface(group_config)\n task = parse_file(filepath)\n\n plans = plan_task(pi, task)\n robot = Robot()\n execute_plans(robot, plans)\n\n # home_config = task[\"variables\"][\"home\"]\n\n # sample valid joint configs for P1\n # samples = pi.sample(create_pose_msg(task[\"variables\"][\"P1\"]), constraint)\n # configs = [list(q.positions) for q in samples.joint_poses]\n\n # WARNING ugly code ahead\n # found = False\n # for config in configs:\n # try:\n # plan1 = pi.movej(home_config, config)\n # except PlanningFailedError as e:\n # print(e)\n # print(\"#### PTP 1: Continue with for loop\")\n # continue\n\n # start_config = copy.deepcopy(\n # plan1.joint_trajectory.points[-1].positions)\n\n # try:\n # plan2 = pi.movel(start_config, create_pose_msg(\n # task[\"variables\"][\"P2\"]))\n # except PlanningFailedError as e:\n # print(e)\n # print(\"#### LIN: Continue with for loop\")\n # continue\n\n # start_config_2 = copy.deepcopy(\n # plan2.joint_trajectory.points[-1].positions)\n\n # try:\n # plan3 = pi.movej(start_config_2, home_config)\n # except PlanningFailedError as e:\n # print(e)\n # print(\"#### PTP 2: Continue with for loop\")\n # continue\n\n # found = True\n # print(\"### Plan found\")\n # break\n\n # if found:\n # print(\"############ found plans, executing the plans\")\n # robot = Robot()\n # execute_plans(robot, [plan1, plan2, plan3])\n # else:\n # print(\"Failed to find plan.\")\n","repo_name":"JeroenDM/benchmark_runner","sub_path":"benchmark_runner/scripts/setup_3_hardcoded.py","file_name":"setup_3_hardcoded.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41930363174","text":"from google.api_core.exceptions import Aborted\nfrom google.api_core.exceptions import AlreadyExists\nfrom google.api_core.exceptions import FailedPrecondition\nfrom google.api_core.exceptions import InternalServerError\nfrom google.api_core.exceptions import InvalidArgument\n\nfrom collections import namedtuple\n\nfrom google.cloud import spanner_v1 as spanner\nfrom google.cloud.spanner_dbapi.checksum import ResultsChecksum\nfrom google.cloud.spanner_dbapi.exceptions import IntegrityError\nfrom google.cloud.spanner_dbapi.exceptions import InterfaceError\nfrom google.cloud.spanner_dbapi.exceptions import OperationalError\nfrom google.cloud.spanner_dbapi.exceptions import ProgrammingError\n\nfrom google.cloud.spanner_dbapi import _helpers\nfrom google.cloud.spanner_dbapi._helpers import ColumnInfo\nfrom google.cloud.spanner_dbapi._helpers import code_to_display_size\n\nfrom google.cloud.spanner_dbapi import parse_utils\nfrom google.cloud.spanner_dbapi.parse_utils import get_param_types\nfrom google.cloud.spanner_dbapi.parse_utils import sql_pyformat_args_to_spanner\nfrom google.cloud.spanner_dbapi.utils import PeekIterator\nfrom google.cloud.spanner_dbapi.utils import StreamedManyResultSets\n\n_UNSET_COUNT = -1\n\nColumnDetails = namedtuple(\"column_details\", [\"null_ok\", \"spanner_type\"])\nStatement = namedtuple(\"Statement\", \"sql, params, param_types, checksum, is_insert\")\n\n\nclass Cursor(object):\n \"\"\"Database cursor to manage the context of a fetch operation.\n\n :type connection: :class:`~google.cloud.spanner_dbapi.connection.Connection`\n :param connection: A DB-API connection to Google Cloud Spanner.\n \"\"\"\n\n def __init__(self, connection):\n self._itr = None\n self._result_set = None\n self._row_count = _UNSET_COUNT\n self.lastrowid = None\n self.connection = connection\n self._is_closed = False\n # the currently running SQL statement results checksum\n self._checksum = None\n\n # the number of rows to fetch at a time with fetchmany()\n self.arraysize = 1\n\n @property\n def is_closed(self):\n \"\"\"The cursor close indicator.\n\n :rtype: bool\n :returns: True if the cursor or the parent connection is closed,\n otherwise False.\n \"\"\"\n return self._is_closed or self.connection.is_closed\n\n @property\n def description(self):\n \"\"\"Read-only attribute containing a sequence of the following items:\n\n - ``name``\n - ``type_code``\n - ``display_size``\n - ``internal_size``\n - ``precision``\n - ``scale``\n - ``null_ok``\n\n :rtype: tuple\n :returns: A tuple of columns' information.\n \"\"\"\n if not self._result_set:\n return None\n\n if not getattr(self._result_set, \"metadata\", None):\n return None\n\n row_type = self._result_set.metadata.row_type\n columns = []\n\n for field in row_type.fields:\n column_info = ColumnInfo(\n name=field.name,\n type_code=field.type_.code,\n # Size of the SQL type of the column.\n display_size=code_to_display_size.get(field.type_.code),\n # Client perceived size of the column.\n internal_size=field._pb.ByteSize(),\n )\n columns.append(column_info)\n\n return tuple(columns)\n\n @property\n def rowcount(self):\n \"\"\"The number of rows produced by the last `.execute()`.\n\n :rtype: int\n :returns: The number of rows produced by the last .execute*().\n \"\"\"\n return self._row_count\n\n def _raise_if_closed(self):\n \"\"\"Raise an exception if this cursor is closed.\n\n Helper to check this cursor's state before running a\n SQL/DDL/DML query. If the parent connection is\n already closed it also raises an error.\n\n :raises: :class:`InterfaceError` if this cursor is closed.\n \"\"\"\n if self.is_closed:\n raise InterfaceError(\"Cursor and/or connection is already closed.\")\n\n def callproc(self, procname, args=None):\n \"\"\"A no-op, raising an error if the cursor or connection is closed.\"\"\"\n self._raise_if_closed()\n\n def close(self):\n \"\"\"Closes this cursor.\"\"\"\n self._is_closed = True\n\n def _do_execute_update(self, transaction, sql, params):\n sql = parse_utils.ensure_where_clause(sql)\n sql, params = parse_utils.sql_pyformat_args_to_spanner(sql, params)\n\n result = transaction.execute_update(\n sql, params=params, param_types=get_param_types(params)\n )\n self._itr = None\n if type(result) == int:\n self._row_count = result\n\n return result\n\n def execute(self, sql, args=None):\n \"\"\"Prepares and executes a Spanner database operation.\n\n :type sql: str\n :param sql: A SQL query statement.\n\n :type args: list\n :param args: Additional parameters to supplement the SQL query.\n \"\"\"\n if not self.connection:\n raise ProgrammingError(\"Cursor is not connected to the database\")\n\n self._raise_if_closed()\n\n self._result_set = None\n\n # Classify whether this is a read-only SQL statement.\n try:\n classification = parse_utils.classify_stmt(sql)\n if classification == parse_utils.STMT_DDL:\n for ddl in sql.split(\";\"):\n ddl = ddl.strip()\n if ddl:\n self.connection._ddl_statements.append(ddl)\n if self.connection.autocommit:\n self.connection.run_prior_DDL_statements()\n return\n\n # For every other operation, we've got to ensure that\n # any prior DDL statements were run.\n # self._run_prior_DDL_statements()\n self.connection.run_prior_DDL_statements()\n\n if not self.connection.autocommit:\n if classification == parse_utils.STMT_UPDATING:\n sql = parse_utils.ensure_where_clause(sql)\n\n if classification != parse_utils.STMT_INSERT:\n sql, args = sql_pyformat_args_to_spanner(sql, args or None)\n\n statement = Statement(\n sql,\n args,\n get_param_types(args or None)\n if classification != parse_utils.STMT_INSERT\n else {},\n ResultsChecksum(),\n classification == parse_utils.STMT_INSERT,\n )\n (self._result_set, self._checksum,) = self.connection.run_statement(\n statement\n )\n while True:\n try:\n self._itr = PeekIterator(self._result_set)\n break\n except Aborted:\n self.connection.retry_transaction()\n return\n\n if classification == parse_utils.STMT_NON_UPDATING:\n self._handle_DQL(sql, args or None)\n elif classification == parse_utils.STMT_INSERT:\n _helpers.handle_insert(self.connection, sql, args or None)\n else:\n self.connection.database.run_in_transaction(\n self._do_execute_update, sql, args or None\n )\n except (AlreadyExists, FailedPrecondition) as e:\n raise IntegrityError(e.details if hasattr(e, \"details\") else e)\n except InvalidArgument as e:\n raise ProgrammingError(e.details if hasattr(e, \"details\") else e)\n except InternalServerError as e:\n raise OperationalError(e.details if hasattr(e, \"details\") else e)\n\n def executemany(self, operation, seq_of_params):\n \"\"\"Execute the given SQL with every parameters set\n from the given sequence of parameters.\n\n :type operation: str\n :param operation: SQL code to execute.\n\n :type seq_of_params: list\n :param seq_of_params: Sequence of additional parameters to run\n the query with.\n \"\"\"\n self._raise_if_closed()\n\n classification = parse_utils.classify_stmt(operation)\n if classification == parse_utils.STMT_DDL:\n raise ProgrammingError(\n \"Executing DDL statements with executemany() method is not allowed.\"\n )\n\n many_result_set = StreamedManyResultSets()\n\n for params in seq_of_params:\n self.execute(operation, params)\n many_result_set.add_iter(self._itr)\n\n self._result_set = many_result_set\n self._itr = many_result_set\n\n def fetchone(self):\n \"\"\"Fetch the next row of a query result set, returning a single\n sequence, or None when no more data is available.\"\"\"\n self._raise_if_closed()\n\n try:\n res = next(self)\n if not self.connection.autocommit:\n self._checksum.consume_result(res)\n return res\n except StopIteration:\n return\n except Aborted:\n self.connection.retry_transaction()\n return self.fetchone()\n\n def fetchall(self):\n \"\"\"Fetch all (remaining) rows of a query result, returning them as\n a sequence of sequences.\n \"\"\"\n self._raise_if_closed()\n\n res = []\n try:\n for row in self:\n if not self.connection.autocommit:\n self._checksum.consume_result(row)\n res.append(row)\n except Aborted:\n self.connection.retry_transaction()\n return self.fetchall()\n\n return res\n\n def fetchmany(self, size=None):\n \"\"\"Fetch the next set of rows of a query result, returning a sequence\n of sequences. An empty sequence is returned when no more rows are available.\n\n :type size: int\n :param size: (Optional) The maximum number of results to fetch.\n\n :raises InterfaceError:\n if the previous call to .execute*() did not produce any result set\n or if no call was issued yet.\n \"\"\"\n self._raise_if_closed()\n\n if size is None:\n size = self.arraysize\n\n items = []\n for i in range(size):\n try:\n res = next(self)\n if not self.connection.autocommit:\n self._checksum.consume_result(res)\n items.append(res)\n except StopIteration:\n break\n except Aborted:\n self.connection.retry_transaction()\n return self.fetchmany(size)\n\n return items\n\n def nextset(self):\n \"\"\"A no-op, raising an error if the cursor or connection is closed.\"\"\"\n self._raise_if_closed()\n\n def setinputsizes(self, sizes):\n \"\"\"A no-op, raising an error if the cursor or connection is closed.\"\"\"\n self._raise_if_closed()\n\n def setoutputsize(self, size, column=None):\n \"\"\"A no-op, raising an error if the cursor or connection is closed.\"\"\"\n self._raise_if_closed()\n\n def _handle_DQL(self, sql, params):\n with self.connection.database.snapshot() as snapshot:\n # Reference\n # https://googleapis.dev/python/spanner/latest/session-api.html#google.cloud.spanner_v1.session.Session.execute_sql\n sql, params = parse_utils.sql_pyformat_args_to_spanner(sql, params)\n res = snapshot.execute_sql(\n sql, params=params, param_types=get_param_types(params)\n )\n if type(res) == int:\n self._row_count = res\n self._itr = None\n else:\n # Immediately using:\n # iter(response)\n # here, because this Spanner API doesn't provide\n # easy mechanisms to detect when only a single item\n # is returned or many, yet mixing results that\n # are for .fetchone() with those that would result in\n # many items returns a RuntimeError if .fetchone() is\n # invoked and vice versa.\n self._result_set = res\n # Read the first element so that the StreamedResultSet can\n # return the metadata after a DQL statement. See issue #155.\n while True:\n try:\n self._itr = PeekIterator(self._result_set)\n break\n except Aborted:\n self.connection.retry_transaction()\n # Unfortunately, Spanner doesn't seem to send back\n # information about the number of rows available.\n self._row_count = _UNSET_COUNT\n\n def __enter__(self):\n return self\n\n def __exit__(self, etype, value, traceback):\n self.close()\n\n def __next__(self):\n if self._itr is None:\n raise ProgrammingError(\"no results to return\")\n return next(self._itr)\n\n def __iter__(self):\n if self._itr is None:\n raise ProgrammingError(\"no results to return\")\n return self._itr\n\n def list_tables(self):\n \"\"\"List the tables of the linked Database.\n\n :rtype: list\n :returns: The list of tables within the Database.\n \"\"\"\n return self.run_sql_in_snapshot(_helpers.SQL_LIST_TABLES)\n\n def run_sql_in_snapshot(self, sql, params=None, param_types=None):\n # Some SQL e.g. for INFORMATION_SCHEMA cannot be run in read-write transactions\n # hence this method exists to circumvent that limit.\n self.connection.run_prior_DDL_statements()\n\n with self.connection.database.snapshot() as snapshot:\n res = snapshot.execute_sql(sql, params=params, param_types=param_types)\n return list(res)\n\n def get_table_column_schema(self, table_name):\n rows = self.run_sql_in_snapshot(\n sql=_helpers.SQL_GET_TABLE_COLUMN_SCHEMA,\n params={\"table_name\": table_name},\n param_types={\"table_name\": spanner.param_types.STRING},\n )\n\n column_details = {}\n for column_name, is_nullable, spanner_type in rows:\n column_details[column_name] = ColumnDetails(\n null_ok=is_nullable == \"YES\", spanner_type=spanner_type\n )\n return column_details\n","repo_name":"muralikrishna-gt/capacity-api","sub_path":"google/cloud/spanner_dbapi/cursor.py","file_name":"cursor.py","file_ext":"py","file_size_in_byte":14437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6439643643","text":"import numpy as np\n\n# Shift an image by a given amount - subpixel shifts are permitted\nfrom scipy.ndimage.interpolation import shift\n\n#\nfrom coalign_mapcube import default_data_manipulation_function, clip_edges, calculate_shift\n\n\n#\n# Coalign a datacube. Can be useful to have this functionality when you just\n# want to deal with datacubes and not the more complex maps.\n#\n# Shift a datacube according to calculated co-registration displacements\n#\ndef coalign_datacube(datacube, layer_index=0, template_index=None,\n clip=False, func=default_data_manipulation_function):\n \"\"\"\n Co-align the layers in a datacube by finding where a template best matches\n each layer in the datacube.\n\n Input\n -----\n datacube : a numpy array of shape (ny, nx, nt), where nt is the number of\n layers in the datacube.\n\n layer_index : the layer in the datacube from which the template will be\n extracted.\n\n template_index : a array-like set of co-ordinates of the bottom left hand\n cornor and the top right cornor of the template. If set\n to None, then the default template is used. The\n template_index is defined as [ [y1, x1], [y2, x2] ].\n\n clip : clip off x, y edges in the datacube that are potentially affected\n by edges effects.\n\n func: a function which is applied to the data values before the\n coalignment method is applied. This can be useful in coalignment,\n because it is sometimes better to co-align on a function of the data\n rather than the data itself. The calculated shifts are applied to\n the original data. Useful functions to consider are the log of the\n image data, or 1 / data. The function is of the form func = F(data). \n The default function ensures that the data are floats.\n\n Output\n ------\n datacube : the input datacube each layer having been co-registered against\n the template.\n\n y_displacement : a one dimensional array of length nt with the pixel\n y-displacements relative position of the template at the\n value layer_index. Note that y_displacement[layer_index]\n is zero by definition.\n\n x_displacement : a one dimensional array of length nt with the pixel\n x-displacements relative position of the template at the\n value layer_index. Note that x_displacement[layer_index]\n is zero by definition.\n \"\"\"\n # Size of the data\n ny = datacube.shape[0]\n nx = datacube.shape[1]\n nt = datacube.shape[2]\n\n # Storage for the shifted data and the pixel shifts\n xshift_keep = np.zeros((nt))\n yshift_keep = np.zeros((nt))\n\n # Calculate the template\n if template_index == None:\n ny = datacube.shape[0]\n nx = datacube.shape[1]\n template = datacube[ny / 4: 3 * ny / 4,\n nx / 4: 3 * nx / 4,\n layer_index]\n else:\n template = datacube[template_index[0][0]:template_index[1][0],\n template_index[0][1]:template_index[1][1],\n layer_index]\n\n # Apply the data manipulation function\n template = func(template)\n\n for i in range(0, nt):\n # Get the next 2-d data array\n this_layer = func(datacube[:, :, i])\n\n # Calculate the y and x shifts in pixels\n yshift, xshift = calculate_shift(this_layer, template)\n\n # Keep shifts in pixels\n yshift_keep[i] = yshift\n xshift_keep[i] = xshift\n\n # Calculate shifts relative to the template layer\n yshift_keep = yshift_keep - yshift_keep[layer_index]\n xshift_keep = xshift_keep - xshift_keep[layer_index]\n\n # Shift the data\n shifted_datacube = shift_datacube_layers(datacube, -yshift_keep, -xshift_keep)\n\n if clip:\n return clip_edges(shifted_datacube, yshift_keep, xshift_keep), yshift_keep, xshift_keep\n else:\n return shifted_datacube, yshift_keep, xshift_keep\n\n\n#\n# Shift a datacube. Useful for coaligning images and performing solar\n# derotation.\n#\ndef shift_datacube_layers(datacube, yshift, xshift):\n ny = datacube.shape[0]\n nx = datacube.shape[1]\n nt = datacube.shape[2]\n shifted_datacube = np.zeros((ny, nx, nt))\n for i in range(0, nt):\n shifted_datacube[:, :, i] = shift(datacube[:, :, i], [yshift[i], xshift[i]])\n\n return shifted_datacube\n","repo_name":"wafels/rednoise","sub_path":"py/tools/coalign_datacube.py","file_name":"coalign_datacube.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6147133214","text":"def count_substring(s, sb):\r\n results = 0\r\n sub_len = len(sb)\r\n for i in range(len(s)):\r\n print(s[i:i + sub_len])\r\n if s[i:i + sub_len] == sb:\r\n results += 1\r\n return results\r\n\r\n\r\nif __name__ == '__main__':\r\n string = input().strip()\r\n sub_string = input().strip()\r\n\r\n count = count_substring(string, sub_string)\r\n print(count)\r\n","repo_name":"AlMamun-CSE/Python-Problem-Solving","sub_path":"FindAString.py","file_name":"FindAString.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"9393339128","text":"from flask import Flask, send_from_directory, request\nfrom flask_cors import CORS\nimport uuid\nfrom pymongo import MongoClient\n#myclient = MongoClient(\"mongodb://localhost:27017/\")\nmyclient = MongoClient(host='test_mongodb', port=27017, username='admin', password='password')\ndb = myclient[\"ewaste_db\"]\nsession_ids = dict()\nfrom user import user_api, account_api\nfrom device import device_api\n#from payment import payment_api\nfrom auth import auth_api\nfrom vendors import vendors_api\nfrom datalinks import datalinks_api\nfrom transaction import transaction_api\nfrom dbscript import rebuilddb, buildvendordatasource\n\napp = Flask(__name__)\napp.register_blueprint(user_api, url_prefix='/user')\napp.register_blueprint(account_api, url_prefix='/account')\napp.register_blueprint(device_api, url_prefix='/device')\napp.register_blueprint(transaction_api, url_prefix='/transaction')\napp.register_blueprint(auth_api, url_prefix='/auth')\napp.register_blueprint(vendors_api, url_prefix='/vendor')\napp.register_blueprint(datalinks_api, url_prefix='/datalinks')\n\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n\n\n@app.route(\"/\", defaults={'path':''})\ndef serve(path):\n return send_from_directory(app.static_folder,'index.html')\n\n\n@app.route('/apigettest')\ndef apigettest():\n print(request.json)\n if('session-id' in request.cookies and request.cookies.get('session-id') in session_ids):\n print(request.cookies.get('session-id'))\n return {\"response\":\"success\"}\n else:\n generated_session_id = str(uuid.uuid4())\n response = app.make_response({\"response\":\"success\", \"message\":\"cookie generated\"})\n response.set_cookie('session-id', generated_session_id)\n ##session_ids[generated_session_id] = str(uuid.uuid4())\n ##print(session_ids)\n return response\n\n\n@app.route('/apiposttest', methods=['POST'])\ndef apiposttest():\n print(request.json)\n return {\"results\": request.json}\n\nbuildvendordatasource()\nrebuilddb()\n","repo_name":"Larasify/eWaste","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4271732390","text":"import datetime\n\nfrom earthkit.data import from_source\n\n\ndef test_url_pattern_source_1():\n from_source(\n \"url-pattern\",\n \"https://get.ecmwf.int/repository/test-data/earthkit-data/examples/test.{format}\",\n {\"format\": [\"nc\", \"grib\"]},\n )\n # source.to_xarray()\n\n\ndef test_url_pattern_int():\n fs = from_source(\n \"url-pattern\",\n \"https://get.ecmwf.int/repository/test-data/earthkit-data/examples/test{id}.grib\",\n {\"id\": [4, 6]},\n )\n\n assert len(fs) == 10\n\n\ndef test_url_pattern_date():\n fs = from_source(\n \"url-pattern\",\n \"https://get.ecmwf.int/repository/test-data/earthkit-data/test-data/\"\n \"test_{my_date:date(%Y-%m-%d)}_{name}.grib\",\n {\"my_date\": datetime.datetime(2020, 5, 13), \"name\": [\"t2\", \"msl\"]},\n )\n\n assert len(fs) == 2\n\n\nif __name__ == \"__main__\":\n from earthkit.data.testing import main\n\n main(__file__)\n","repo_name":"ecmwf/earthkit-data","sub_path":"tests/sources/test_url_pattern.py","file_name":"test_url_pattern.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"62"} +{"seq_id":"71324421637","text":"# unpack the message receive from the server\nimport struct\n\n\ndef getTable(getMsg):\n head, kind, size, table, ownNick, ownCoin, mateNick, mateCoin, end = struct.unpack('!BBBH10sI10sIB', getMsg)\n ownNick = ownNick.decode('utf-8')\n mateNick = mateNick.decode('utf-8')\n getMsg = {'head': head, 'kind': kind, 'size': size, 'table': table, 'ownNick': ownNick,\n 'ownCoin': ownCoin, 'mateNick': mateNick, 'mateCoin': mateCoin, 'end': end}\n print('getTable<<<<<<<<<<<> 1\n\n getMsg = {'head': head, 'kind': kind, 'size': size, 'ID': ID, 'egg': egg, 'end': end}\n print('getEgg<<<<<<<<<<< 100:\r\n nakupyNad100 += 1\r\nprint(\"Celkem jste utratili {} Kč.\".format(celkem))\r\nprint(\"Počet nákupů přesahujících 100 Kč: {}\".format(nakupyNad100))\r\nprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n","repo_name":"pavelcerny68/pythonick","sub_path":"d02_scitaniuctenek.py","file_name":"d02_scitaniuctenek.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"cs","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15303360982","text":"#!/usr/bin/env python\n\nimport sys\nimport natsort\nimport pyfastx\nimport argparse\nfrom typing import Dict\nimport multiprocessing as mp\nfrom collections import defaultdict\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n required=True,\n help=\"FASTA file to read\"\n )\n parser.add_argument(\n \"--target\",\n type=str,\n required=True,\n help=\"target chromosomes separated by new line\"\n )\n parser.add_argument(\n \"-t\",\n \"--threads\",\n type=int,\n required=True,\n help=\"number of threads\"\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n required=True,\n help=\"file to return trinucleotide sequence context counts\"\n )\n args = args[1:]\n return parser.parse_args(args)\n\n\ntri_lst = [\n \"ACA\",\n \"ACC\",\n \"ACG\",\n \"ACT\",\n \"ATA\",\n \"ATC\",\n \"ATG\",\n \"ATT\",\n \"CCA\",\n \"CCC\",\n \"CCG\",\n \"CCT\",\n \"CTA\",\n \"CTC\",\n \"CTG\",\n \"CTT\",\n \"GCA\",\n \"GCC\",\n \"GCG\",\n \"GCT\",\n \"GTA\",\n \"GTC\",\n \"GTG\",\n \"GTT\",\n \"TCA\",\n \"TCC\",\n \"TCG\",\n \"TCT\",\n \"TTA\",\n \"TTC\",\n \"TTG\",\n \"TTT\",\n]\npurine = set([\"A\", \"G\"])\npurine2pyrimidine = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\", \"N\": \"N\"}\n\n\ndef get_chrom_tricount(\n chrom: str,\n seq: str,\n chrom2tri2count: Dict[str, Dict[str, int]],\n) -> Dict[str, Dict[str, int]]:\n\n tri2count = defaultdict(lambda: 0)\n for i in range(len(seq)-2):\n base = seq[i]\n if base == \"N\":\n continue\n tri = seq[i:i+3]\n if tri[1] in purine:\n tri_pyr = \"\".join(\n [purine2pyrimidine.get(base, \"N\") for base in tri[::-1]]\n )\n tri2count[tri_pyr] += 1\n else:\n tri2count[tri] += 1\n chrom2tri2count[chrom] = dict(tri2count)\n\n\ndef ref2tri(seqfile, tgtfile, threads, outfile):\n\n p = mp.Pool(threads)\n manager = mp.Manager()\n manager = mp.Manager()\n refseq = pyfastx.Fasta(seqfile)\n chrom2tri2count = manager.dict()\n chrom_lst = natsort.natsorted([line.strip() for line in open(tgtfile)])\n get_chrom_tricount_arg_lst = [\n (\n chrom, \n str(refseq[chrom]), \n chrom2tri2count\n )\n for chrom in chrom_lst\n ]\n p.starmap(get_chrom_tricount, get_chrom_tricount_arg_lst)\n p.close()\n p.join()\n \n tri2count = defaultdict(lambda: 0)\n for chrom in chrom_lst:\n for tri in tri_lst:\n tri2count[tri] += chrom2tri2count[chrom][tri] \n trisum = sum(tri2count.values())\n\n o = open(outfile, \"w\")\n for tri in tri_lst:\n tricount = tri2count[tri]\n o.write(\"{}\\t{}\\t{:.2f}\\n\".format(tri, tricount, (tricount/float(trisum))*100))\n o.close()\n\n \ndef main():\n options = parse_args(sys.argv)\n ref2tri(options.input, options.target, options.threads, options.output) \n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sjin09/himut","sub_path":"scripts/ref2tri.py","file_name":"ref2tri.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29869200056","text":"import socket\r\nimport sys\r\nimport traceback\r\nfrom threading import Thread\r\n\r\ndef main():\r\n start_server()\r\n \r\ndef start_server():\r\n while True:\r\n try:\r\n host = str(input(\"Please type in the IP: \"))\r\n port = int(input(\"Please type in the port number: \"))\r\n break\r\n except:\r\n print(\"Error in the values entered. Please retry.\")\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n print(\"Socket created\")\r\n try:\r\n sock.bind((host, port))\r\n except:\r\n print(\"Bind failed. Error : \" + str(sys.exc_info()))\r\n sys.exit()\r\n sock.listen(6) # queue up to 6 requests\r\n print(\"Socket now listening\")\r\n # infinite loop- do not reset for every requests\r\n while True:\r\n connection, address = sock.accept()\r\n ip, port = str(address[0]), str(address[1])\r\n print(\"Connection from client \" + ip + \":\" + port)\r\n try:\r\n Thread(target=clientThread, args=(connection, ip, port)).start()\r\n except:\r\n print(\"Thread did not start.\")\r\n #traceback.print_exc()\r\n sock.close()\r\n \r\ndef clientThread(connection, ip, port, max_buffer_size = 1024):\r\n is_active = True\r\n while is_active:\r\n client_input = connection.recv(max_buffer_size).decode(\"utf8\")\r\n #print(client_input)\r\n clientid,msg=client_input.split(\":\")\r\n if \"EXIT\" in msg.upper():\r\n connection.send(\"ok\".encode(\"utf8\"))\r\n print(\"Client {} is requesting to quit\".format(clientid))\r\n print(\"Connection \" + ip + \":\" + port + \" closed\")\r\n else:\r\n print(\"Client {} sent data: {}\".format(clientid,msg))\r\n connection.send(msg.encode(\"utf8\"))\r\n connection.close()\r\n is_active = False\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"miguelob/Distributed_Systems","sub_path":"Threads Comms/Threads 1.2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12308995640","text":"\"\"\"\n ANALIZA DANYCH POD KĄTEM WYZNACZENIA WYSTARCZALNEJ DŁUGOŚCI\n DLA BADANIA DŹWIĘKÓW\n\"\"\"\n\nimport config\nfrom pymodules.audiohelpers import *\nfrom pymodules.utilities import *\nimport multiprocessing\nimport os\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport soundfile as sf\nimport librosa\nimport sounddevice as sd\nimport time\nfrom tqdm import tqdm\n\n\n\n# Przykłądowy wykres RMS:\ncalcuateRealLength('src/Kick/Kick 0000.flac', plot=True)\nplt.savefig('plots/RMS example.png')\n \n\n\n\ntresholds = [0.25, 0.1, 0.05, 0.01]\nfiles = librosa.util.find_files('src')\n \nprint('CALCULATING TIMES...')\noutput = []\nfor i, file in enumerate(files):\n print(f'{i+1}/{len(files)}')\n output.append(calcuateRealLength(file, tresholds))\noutput = np.vstack(output)\n\n# Wykres czasów:\nwith open(\"out_main/RMS tresholds.txt\", \"w\") as text_file:\n print(f'Output from: script_0_rms_measurements.py\\n\\n', file=text_file)\n \n plt.figure(figsize=(7, 3))\n percentage = 0.95\n plt.hlines(percentage, 0, len(files)+1, color='black', alpha=0.3, label=f'{percentage*100}%')\n \n \n for i in range(len(tresholds)):\n treshold = tresholds[i]\n times = np.sort(output[:,i])\n print('TRESHOLD: ', treshold, file=text_file)\n print('Min: ', np.round(np.min(times), 3), ', Max: ', np.round(np.max(times), 3), ', Avg: ', np.round(np.mean(times), 3), file=text_file)\n times = np.insert(times, 0, 0)\n count = np.linspace(0, 1, len(times))\n plt.step(times, count, where='post', color=getTabColor(i+1))\n plt.vlines(times[int(len(times)*percentage)], 0, 1, color=getTabColor(i+1),\n alpha=0.8, linestyle='--', label=f'Próg: {int(treshold*100)}% z RMS max')\n \n plt.xlim(0, 3)\n plt.ylabel('Procent dźwięków krótszych')\n plt.xlabel('Czas [s]')\n plt.legend()\n plt.tight_layout()\n plt.savefig('plots/RMS tresholds.png')","repo_name":"aronmandrella/classification-of-percussive-sounds-using-convolution-neural-networks","sub_path":"Python Scripts/script_0_rms_measurements.py","file_name":"script_0_rms_measurements.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30018920079","text":"def localortho(signal,noise,rect,niter=50,eps=0.0,verb=1):\n\t#LOCAORTHO: Noise attenuation using local signal-and-noise\n\t#orthogonalization and output the local orthogonalization weight (LOW)\n\t#\n\t#IN signal: initial signal\n\t# noise: initial noise\n\t# rect: 3-D vector denoting smooth radius\n\t# niter: number of CG iterations\n\t# eps: regularization parameter, default 0.0\n\t# verb: verbosity flag (default: 0)\n\t#\n\t#OUT signal2: orthogonalized signal\n\t# noise2: orthogonalized noise\n\t# low: local orthogonalization weight\n\t#\n\t#Copyright (C) 2016 Yangkang Chen\n\t#Ported to Python in 2022 by Yangkang Chen \n\t#\n\t#This program is free software: you can redistribute it and/or modify\n\t#it under the terms of the GNU General Public License as published\n\t#by the Free Software Foundation, either version 3 of the License, or\n\t#any later version.\n\t#\n\t#This program is distributed in the hope that it will be useful,\n\t#but WITHOUT ANY WARRANTY; without even the implied warranty of\n\t#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\t#GNU General Public License for more details: http://www.gnu.org/licenses/\n\t#\n\t#Reference: 1. Random noise attenuation using local signal-and-noise orthogonalization\n\t# Chen and Fomel, 2015, Geophysics\n\t# 2. Ground-Roll Noise Attenuation Using a Simple and Effective Approach Based on \n\t# Local Band-Limited Orthogonalization, Chen et al., 2015, IEEE Geoscience and Remote Sensing Letters\n\t# 3. Iterative deblending with multiple constraints based on shaping regularization,\n\t# Chen, 2015, IEEE Geoscience and Remote Sensing Letters\n\t# 4. Orthogonalized morphological reconstruction for weak signal detection in micro-seismic monitoring:\n\t# Methodology, Huang et al., 2018, GJI\n\t# 5. Surface-related multiple leakage extraction using local primary-and-multiple \n\t# orthogonalization, Zhang et al., 2020, Geophysics\n\t# 6. Non-stationary local signal-and-noise orthogonalization, Chen et al.,\n\t# 2020, Geophysics\n\t# 7. Local primary-and-multiple orthogonalization for leaked internal multiple crosstalk estimation and attenuation on full-wavefield migrated images\n\t# Zhang, et al., 2020, Geophysics\n\t#\n\t# DEMO\n\t# demos/test_pyortho_localortho2d.py\n\t# demos/test_pyortho_localortho3d.py\n\n\timport numpy as np\n\tfrom .divne import divne\n\t\n\tif signal.ndim==2:\t#for 2D problems\n\t\tsignal=np.expand_dims(signal, axis=2)\n\tif noise.ndim==2:\t#for 2D problems\n\t\tnoise=np.expand_dims(noise, axis=2)\n\t[n1,n2,n3]=signal.shape\n\t\n\tnd=n1*n2*n3;\n\tndat=[n1,n2,n3];\n\t\n\teps_dv=eps;\n\teps_cg=0.1; \n\ttol_cg=0.000001;\n\tratio = divne(noise, signal, niter, rect, ndat, eps_dv, eps_cg, tol_cg,verb);\n\t\n\tsignal2=signal+ratio*signal;\n\tnoise2=noise-ratio*signal;\n\tlow=ratio;\n\n\treturn signal2,noise2,low\n\t\n\t","repo_name":"chenyk1990/pyortho","sub_path":"pyortho/localortho.py","file_name":"localortho.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5897205707","text":"from urllib.parse import quote_plus\n\ndef prepare_tquery(field_query):\n \"\"\"\n Transform the queries for each field so it is compatible with the\n search pattern used in a Trustpilot URL\n\n If the city clause is included, then the country one is not, as\n the city field value would loose relevance in the query and a\n lot of business from other cities would be extracted.\n \"\"\"\n\n if 'city' in field_query:\n query = quote_plus(field_query['city'])\n if 'name' in field_query:\n query += quote_plus(f\" {field_query['name']}\")\n if 'general' in field_query:\n query = quote_plus(field_query['general'])\n \n return query\n\nSEARCH_FIELDS = ['city', 'country', 'name']\nSEP = ','\nEQ = ':'\n\ndef parse_query(query):\n\n if not query:\n raise Exception(\"A query cannot be empty.\")\n\n field_value = {}\n \n if EQ in query:\n\n search_clauses = query.split(SEP)\n\n for clause in search_clauses:\n\n field, value = clause.split(EQ)\n field, value = field.strip(), value.strip()\n\n if field not in SEARCH_FIELDS:\n raise Exception(f\"Searched for {field}. The only fields that can be searched for are {', '.join(SEARCH_FIELDS)}\")\n if field in field_value:\n raise Exception(\"A field cannot be doubled-searched in the same query.\")\n\n field_value[field] = value\n\n if 'city' in field_value:\n if 'country' not in field_value:\n raise Exception(\"\"\"If the search is restricted to city,\n then the country of the city must be included as well.\"\"\")\n else:\n field_value['general'] = query\n\n return field_value\n \n \n","repo_name":"phoenixsite/fakepilot","sub_path":"src/fakepilot/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13069875584","text":"import numpy as np\n\nclass KPM(object):\n \"\"\"description of class\"\"\"\n\n#首先获取 prefix table\n#然后再进行移位\n#然后进行对比\ndef prefix_table(pattern,prefix,n):\n prefix[0] = 0\n len = 0 \n i = 1\n while i < n:\n if pattern[i] == pattern[int(len)]:\n len = len + 1\n prefix[i] = len\n i = i + 1\n else:\n if len > 0:\n len = prefix[int(len - 1)]\n else:\n prefix[i] = len\n i = i + 1\n\ndef move_prefix_table(prefix,n):\n for i in range(len(prefix) - 1,2,-1): \n prefix[i] = prefix[i - 1]\n prefix[0] = -1\n\ndef kmp_search(pattern, text):\n n = len(pattern)\n prefix = n * [0]\n prefix_table(pattern,prefix,n)\n move_prefix_table(prefix,len) \n m = len(text)\n i = 0\n j = 0\n while(i < m):\n if j == n - 1 and text[i] == pattern[j]:\n print(\"Found at\",i - j)\n j = prefix[j]\n if text[i] == pattern[j]:\n i = i + 1\n j = j + 1\n else:\n j = prefix[j]\n if j == -1:\n i = i + 1\n j = j + 1 \n\npattern = 'ABABCABAA'\ntext = 'ABABABCABAABABABAB'\nkmp_search(pattern,text)\n\n\n","repo_name":"SeeSeeSeeYou/DynamicProgramDemo","sub_path":"DynamicProgramming/KMP.py","file_name":"KMP.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41795344562","text":"import numpy as np\n\nclass NeuronLayer():\n def __init__(self, number_of_neurons, number_of_inputs_per_neuron):\n self.synaptic_weights = np.random.random((number_of_inputs_per_neuron, number_of_neurons))\n\n\nclass NeuralNetwork():\n def __init__(self, layer1, layer2):\n self.layer1 = layer1\n self.layer2 = layer2\n\n # The Sigmoid function, which describes an S shaped curve.\n # We pass the weighted sum of the inputs through this function to\n # normalise them between 0 and 1.\n def __sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n # The derivative of the Sigmoid function.\n # This is the gradient of the Sigmoid curve.\n # It indicates how confident we are about the existing weight.\n def __sigmoid_derivative(self, x):\n return x * (1 - x)\n\n # We train the neural network through a process of trial and error.\n # Adjusting the synaptic weights each time.\n def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):\n print(training_set_inputs)\n print(training_set_outputs)\n for iteration in range(number_of_training_iterations):\n # Pass the training set through our neural network\n output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs)\n\n # Calculate the error for layer 2 (The difference between the desired output\n # and the predicted output).\n layer2_error = training_set_outputs - output_from_layer_2\n layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2)\n\n # Calculate the error for layer 1 (By looking at the weights in layer 1,\n # we can determine by how much layer 1 contributed to the error in layer 2).\n layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T)\n layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1)\n\n # Calculate how much to adjust the weights by\n layer1_adjustment = training_set_inputs.T.dot(layer1_delta)\n layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)\n\n # Adjust the weights.\n self.layer1.synaptic_weights += layer1_adjustment\n self.layer2.synaptic_weights += layer2_adjustment\n\n # The neural network thinks.\n def think(self, inputs):\n output_from_layer1 = self.__sigmoid(np.dot(inputs, self.layer1.synaptic_weights))\n output_from_layer2 = self.__sigmoid(np.dot(output_from_layer1, self.layer2.synaptic_weights))\n return output_from_layer1, output_from_layer2\n\n # The neural network prints its weights\n def print_weights(self):\n print(\" Layer 1 (4 neurons, each with 3 inputs): \")\n print(self.layer1.synaptic_weights)\n print(\" Layer 2 (1 neuron, with 4 inputs):\")\n print(self.layer2.synaptic_weights)\n\nif __name__ == \"__main__\":\n\n with open(\"input.txt\", \"r\") as input_file:\n input_text = input_file.read().splitlines()\n inputs = []\n for line in input_text:\n for number in line.split(\" \"):\n inputs.append(number)\n nr_inputs = int(inputs[0])\n input_size = int(inputs[1])\n output_size = int(inputs[2])\n hidden_layer_size = int(inputs[3])\n index = 4\n inputs_list = []\n outputs_list = []\n for i in range(nr_inputs):\n input = []\n output = []\n for j in range(input_size):\n input.append(int(inputs[index]))\n index += 1\n inputs_list.append(input)\n for j in range(output_size):\n output.append(int(inputs[index]))\n index += 1\n outputs_list.append(output)\n\n #Seed the random number generator\n np.random.seed(1)\n\n # Create layer 1 (variable number of neurons, each with 7 inputs)\n layer1 = NeuronLayer(hidden_layer_size, 7)\n\n # Create layer 2 (10 neurons with 2 inputs)\n layer2 = NeuronLayer(10, hidden_layer_size)\n\n # Combine the layers to create a neural network\n neural_network = NeuralNetwork(layer1, layer2)\n\n print(\"Stage 1) Random starting synaptic weights: \")\n neural_network.print_weights()\n\n # The training set. We have 7 examples, each consisting of 3 input values\n # and 1 output value.\n training_set_inputs = np.array(inputs_list)\n training_set_outputs = np.array(outputs_list)\n\n # Train the neural network using the training set.\n # Do it 60,000 times and make small adjustments each time.\n neural_network.train(training_set_inputs, training_set_outputs, 60000)\n\n print(\"Stage 2) New synaptic weights after training: \")\n neural_network.print_weights()\n\n # Test the neural network with a new situation.\n print(\"Stage 3) Considering a new situation [1, 1, 1, 0, 1, 1, 1] -> ?: \")\n hidden_state, output = neural_network.think(np.array([0, 0, 1, 0, 1, 1, 1]))\n for i in output:\n print(\"%.10f\" % i)","repo_name":"adriangotca98/Faculty","sub_path":"Bachelor/3rd year/Artificial Intelligence/tema5.py","file_name":"tema5.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72643958666","text":"from __future__ import print_function\n#\n# Test the SVE registers are visible and changeable via gdbstub\n#\n# This is launched via tests/guest-debug/run-test.py\n#\n\nimport gdb\nimport sys\n\nMAGIC = 0xDEADBEEF\n\nfailcount = 0\n\ndef report(cond, msg):\n \"Report success/fail of test\"\n if cond:\n print (\"PASS: %s\" % (msg))\n else:\n print (\"FAIL: %s\" % (msg))\n global failcount\n failcount += 1\n\ndef run_test():\n \"Run through the tests one by one\"\n\n gdb.execute(\"info registers\")\n report(True, \"info registers\")\n\n gdb.execute(\"info registers vector\")\n report(True, \"info registers vector\")\n\n # Now all the zregs\n frame = gdb.selected_frame()\n for i in range(0, 32):\n rname = \"z%d\" % (i)\n zreg = frame.read_register(rname)\n report(True, \"Reading %s\" % rname)\n for j in range(0, 4):\n cmd = \"set $%s.q.u[%d] = 0x%x\" % (rname, j, MAGIC)\n gdb.execute(cmd)\n report(True, \"%s\" % cmd)\n for j in range(0, 4):\n reg = \"$%s.q.u[%d]\" % (rname, j)\n v = gdb.parse_and_eval(reg)\n report(str(v.type) == \"uint128_t\", \"size of %s\" % (reg))\n for j in range(0, 8):\n cmd = \"set $%s.d.u[%d] = 0x%x\" % (rname, j, MAGIC)\n gdb.execute(cmd)\n report(True, \"%s\" % cmd)\n for j in range(0, 8):\n reg = \"$%s.d.u[%d]\" % (rname, j)\n v = gdb.parse_and_eval(reg)\n report(str(v.type) == \"uint64_t\", \"size of %s\" % (reg))\n report(int(v) == MAGIC, \"%s is 0x%x\" % (reg, MAGIC))\n\n#\n# This runs as the script it sourced (via -x, via run-test.py)\n#\ntry:\n inferior = gdb.selected_inferior()\n arch = inferior.architecture()\n report(arch.name() == \"aarch64\", \"connected to aarch64\")\nexcept (gdb.error, AttributeError):\n print(\"SKIPPING (not connected)\", file=sys.stderr)\n exit(0)\n\ntry:\n # Run the actual tests\n run_test()\nexcept:\n print (\"GDB Exception: %s\" % (sys.exc_info()[0]))\n failcount += 1\n\nprint(\"All tests complete: %d failures\" % failcount)\n\nexit(failcount)\n","repo_name":"qemu/qemu","sub_path":"tests/tcg/aarch64/gdbstub/test-sve.py","file_name":"test-sve.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":8597,"dataset":"github-code","pt":"81"} +{"seq_id":"39420787494","text":"# https://leetcode.com/problems/find-the-duplicate-number/\r\nfrom typing import List\r\nfrom tester import Tester\r\n\r\n\r\nclass Solution:\r\n def findDuplicate(self, nums: List[int]) -> int:\r\n slow = fast = nums[0]\r\n while True:\r\n fast = nums[fast]\r\n fast = nums[fast]\r\n slow = nums[slow]\r\n if fast == slow:\r\n slow = nums[0]\r\n break\r\n while fast != slow:\r\n fast = nums[fast]\r\n slow = nums[slow]\r\n return fast\r\n\r\nt = Tester(Solution())\r\n\r\nt.test(2, [1, 3, 4, 2, 2])\r\nt.test(3, [3, 1, 3, 4, 2])\r\nt.test(1, [1, 1])\r\nt.test(1, [1, 2, 1])\r\nt.test(3, [1, 2, 3, 3])\r\nt.test(2, [2, 2, 2, 2, 2])\r\nt.test(2, [2, 2, 1, 2, 3])\r\n\r\nt.report()\r\n","repo_name":"thinhntr/cp","sub_path":"leetcode/Find the Duplicate Number.py","file_name":"Find the Duplicate Number.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9521033998","text":"import sys\nimport pandas as pd\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.impute import SimpleImputer\nimport numpy as np\n\n\nif __name__ == \"__main__\":\n # fetch data\n x_train = pd.read_csv(\"AMF_train_X.csv\")\n if len(sys.argv) == 2:\n x_train = x_train.head(n=int(sys.argv[1]))\n\n y_train = pd.read_csv(\"AMF_train_Y.csv\")\n x_test = pd.read_csv(\"AMF_test_X.csv\")\n\n type_d = {}\n for index, row in y_train.iterrows():\n type_d[row['Trader']] = row['type']\n\n x_train2 = x_train.dropna()\n y_train2 = np.array([type_d[x] for x in x_train2['Trader']])\n x_train2 = x_train2.drop(columns=['Index', 'Share', 'Day', 'Trader'])\n\n print(\"X:\", len(x_train2))\n print('Training started')\n\n svclassifier = SVC(kernel='linear')\n svclassifier.fit(x_train2, y_train2)\n print('End of training')\n\n x_test2 = x_test.dropna()\n x_test2 = x_test2.drop(columns=['Index', 'Share', 'Day', 'Trader'])\n y_pred = svclassifier.predict(x_test2)\n\n x_test2 = x_test.dropna()\n names = x_test2['Trader'].unique()\n convention = {'HFT': 0, 'NON HFT': 1, 'MIX': 2}\n result = {e: [0, 0, 0] for e in names}\n i = 0\n for index, row in x_test2.iterrows():\n result[row['Trader']][convention[y_pred[i]]] += 1\n i += 1\n\n data = []\n for trader in names:\n n = sum(result[trader])\n if result[trader][0] > 0.85*n:\n data.append([trader, 'HFT'])\n elif result[trader][2] > 0.5*n:\n data.append([trader, 'MIX'])\n else:\n data.append([trader, 'NON HFT'])\n\n export_df = pd.DataFrame(data, columns=['Trader', 'type'])\n export_df.to_csv(r'result.csv', index=False)\n","repo_name":"Straccia11/studies","sub_path":"MLII/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1655829077","text":"import os\n\n\"\"\"\nCOMMAND NAME : mkdir\nDESCRIPTION : To create a directory.\nPARAMETERS : Directory name\n\"\"\"\n\ndef mkdir(dirs):\n out = []\n for arg in dirs:\n os.makedirs(arg)\n out.append(arg + ' created')\n return '\\n'.join(out)\n","repo_name":"Nagesh-a5/Shell-assignment","sub_path":"cmd_pkg/mkdir.py","file_name":"mkdir.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41371435442","text":"import requests\nimport json\nimport os\nimport random\n\n\nurls = []\n\n\ndef site_url(site):\n links = {}\n\n with open('nomes_cursos.txt', 'r+', encoding='latin1') as s:\n lista = s.readlines()\n for i, nome in enumerate(lista):\n lista[i] = lista[i].rstrip('\\n')\n links[lista[i]] = urls[i]\n return links[str(site)]\n\n\nclass TelegramBot:\n def __init__(self):\n self.site_nome = ' '\n self.url = ' '\n self.contador = 0\n self.certo = False\n self.sites = self.ler_sites()\n self.email = ''\n self.senha = ''\n iTOKEN = ''\n self.iURL = f'https://api.telegram.org/bot{iTOKEN}/'\n\n def Iniciar(self):\n iUPDATE_ID = None\n try:\n while True:\n iATUALIZACAO = self.ler_novas_mensagens(iUPDATE_ID)\n IDADOS = iATUALIZACAO[\"result\"]\n if IDADOS:\n for dado in IDADOS:\n iUPDATE_ID = dado['update_id']\n mensagem = str(dado['message']['text'])\n chat_id = dado[\"message\"][\"from\"][\"id\"]\n primeira_mensagem = int(\n dado[\"message\"][\"message_id\"]) == 1\n resposta = self.gerar_respostas(\n mensagem, primeira_mensagem)\n print('usuário: ' + str(mensagem))\n self.responder(resposta, chat_id)\n if self.certo:\n self.contador += 1\n print('contador: ' + str(self.contador))\n except requests.exceptions.ConnectionError:\n return 'Foram excedidas as tentativas de conexão com o servidor.'\n\n def ler_novas_mensagens(self, iUPDATE_ID):\n iLINK_REQ = f'{self.iURL}getUpdates?timeout=100'\n if iUPDATE_ID:\n iLINK_REQ = f'{iLINK_REQ}&offset={iUPDATE_ID + 1}'\n iRESULT = requests.get(iLINK_REQ)\n return json.loads(iRESULT.content)\n\n def gerar_respostas(self, mensagem, primeira_mensagem):\n if primeira_mensagem == True:\n return f'Bem-vindo ao PegasusCloud! Digite o site de sua preferência.'\n\n if mensagem == '/start':\n self.contador = 0\n self.certo = False\n return 'Digite o site de sua preferência.'\n\n if mensagem.lower() in ('olá', 'ola', 'oi'):\n return random.choice(['Olá!', 'Oi!', 'Fala aê!'])\n\n if mensagem.lower() in ('bom dia', 'boa tarde', 'boa noite'):\n if mensagem.lower() == 'bom dia':\n return 'Bom dia, humano!'\n else:\n return 'Boa' + mensagem.lower()[3:] + ', humano!'\n\n if self.contador == 0:\n if mensagem.lower() in self.sites:\n self.certo = True\n self.url = site_url(mensagem.lower())\n self.site_nome = mensagem.lower()\n return f'Ótimo! O site {mensagem.title()} est�� cadastrado!{os.linesep}Agora, por favor, informe seu email.'\n else:\n with open('sites_errados.txt', 'a') as novo_site:\n novo_site.write(mensagem)\n novo_site.write('\\n')\n self.certo = False\n return f'Ah que pena! Não há nenhum site chamado {mensagem.title()} cadastrado.{os.linesep}Incluiremos este site em nossos bancos de dados para que, mais tarde, analisemos.'\n\n elif self.contador == 1:\n if mensagem.lower().strip() == self.email:\n self.certo = True\n return f'Email confere!{os.linesep}Agora, nos informe sua senha.'\n else:\n self.certo = False\n return 'Hum, eu não conheço esse email!'\n\n elif self.contador == 2:\n if mensagem.strip() == self.senha:\n self.certo = True\n return f'Maravilha!! Seu login foi efetuado com sucesso! Agora você terá acesso ao curso!{os.linesep}Este curso está disponível no Google drive do @eusiim. Clique neste link para acessa-lo: {self.url}'\n else:\n self.certo = False\n return 'Opa! Esta senha está errada, humano!'\n\n else:\n self.certo = False\n return f'Nunca nem vi!{os.linesep}Digite /start para reiniciar o processo.'\n\n def responder(self, resposta, chat_id):\n iLINK_REQ = f'{self.iURL}sendMessage?chat_id={chat_id}&text={resposta}'\n requests.get(iLINK_REQ)\n print(\"respondi: \" + str(resposta))\n\n def ler_sites(self):\n with open('nomes_cursos.txt', 'r+', encoding='latin1') as arq:\n sites = arq.readlines()\n for i in range(len(sites)):\n sites[i] = sites[i].rstrip('\\n').lower()\n return sites\n\n\nbot = TelegramBot()\n\n\nif __name__ == '__main__':\n bot.Iniciar()\n","repo_name":"FranciscoAlveJr/Bot_Telegram","sub_path":"Telegram_bot.py","file_name":"Telegram_bot.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25249135814","text":"from superdesk.io.feeding_services import RSSFeedingService\nfrom superdesk.io.registry import register_feeding_service, register_feeding_service_parser\n\n\nclass RSSBelgaFeedingService(RSSFeedingService):\n NAME = 'rss-belga'\n label = 'RSS BELGA'\n\n def _create_item(self, data, field_aliases=None, source='source'):\n item = super()._create_item(data, field_aliases, source)\n\n # get Belga anp atom data\n provider_id = data.get('anp_provider', None)\n if provider_id == 'ANP':\n item['provider_id'] = provider_id\n item['char_count'] = data.get('anp_charcount')\n item['location'] = {\n 'city': data.get('anp_city'),\n 'country': data.get('anp_country')\n }\n item['codes'] = data.get('anp_codes')\n item['copyright'] = data.get('anp_copyright')\n item['financial'] = data.get('anp_financial')\n item['keywords'] = [data.get('anp_keywords')]\n item['language'] = data.get('anp_lang')\n item['priority'] = data.get('anp_priority')\n item['updated_date'] = data.get('anp_updated')\n item['version'] = data.get('anp_version')\n item['word_count'] = data.get('anp_wordcount')\n author_name = data.get('author')\n if author_name:\n author = {\n 'uri': None,\n 'parent': None,\n 'name': author_name,\n 'role': None,\n 'jobtitle': None,\n }\n item['authors'] = [author]\n return item\n\n\nregister_feeding_service(RSSBelgaFeedingService)\nregister_feeding_service_parser(RSSBelgaFeedingService.NAME, None)\n","repo_name":"superdesk/superdesk-belga","sub_path":"server/belga/io/feeding_services/rss_belga.py","file_name":"rss_belga.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"13065805614","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nEmail value object module\n\n\"\"\"\n\n__author__ = 'Samir Adrik'\n__email__ = 'samir.adrik@gmail.com'\n\nimport re\n\nfrom source.util import InvalidEmailError, Assertor, Tracking\n\nfrom .value import Value\n\n\nclass Email(Value):\n \"\"\"\n Email value object implementation\n\n \"\"\"\n\n @Tracking\n def validate_email(self, email: str):\n \"\"\"\n Method for validating a email according to regrex\n\n Parameters\n ----------\n email : str\n string to be validated\n\n \"\"\"\n valid_email = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\"\n r\"\\.[a-zA-Z0-9-.]+$)\").search(email)\n if not valid_email:\n raise InvalidEmailError(\"'{}' is an invalid email\".format(email))\n\n def __init__(self, email: str):\n \"\"\"\n Constructor / Instantiate the class\n\n Parameters\n ----------\n email : str\n email\n\n \"\"\"\n super().__init__()\n try:\n Assertor.assert_data_types([email], [str])\n self.validate_email(email)\n self._email = email\n except Exception as email_error:\n raise email_error\n\n @property\n def email(self):\n \"\"\"\n email getter\n\n Returns\n -------\n out : str\n active email\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, new_email):\n \"\"\"\n email setter\n\n Parameters\n ----------\n new_email : str\n new email to be set\n\n \"\"\"\n Assertor.assert_data_types([new_email], [str])\n self.validate_email(new_email)\n self._email = new_email\n\n def format_email(self):\n \"\"\"\n method that returns formatted email, i.e. in lower case\n\n Returns\n -------\n out : str\n formatted email\n\n \"\"\"\n email = self.email\n formatted = email.lower()\n return formatted\n","repo_name":"seemir/stressa","sub_path":"source/domain/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27779457246","text":"import json\nfrom time import sleep\nimport requests\nfrom datetime import datetime, timedelta\nimport threading\nimport queue\nimport os.path\n\n# use pandas to turn the JSON into a dataframe and then make it into a csv\n\n# {\"searchText\":\"\",\"fq\":[\"patentIssueDate:[2019-01-01T00:00:00Z TO 2019-01-01T23:59:59Z]\"],\"fl\":\"*\",\"mm\":\"100%\",\"df\":\"patentTitle\",\"qf\":\"patentIssueDate appLocationYear appEarlyPubNumber applId appLocation appType appStatus_txt appConfrNumber appCustNumber appGrpArtNumber appCls appSubCls appEntityStatus_txt patentNumber patentTitle primaryInventor firstNamedApplicant wipoEarlyPubNumber pctAppType firstInventorFile appClsSubCls rankAndInventorsList\",\"facet\":\"false\",\"sort\":\"applId asc\",\"start\":\"0\"}\n\ndef queryformatter(startDate, endDate):\n issueDate = \"{}Z TO {}Z\".format(startDate.isoformat(), endDate.isoformat())\n variables = \"appLocationYear appEarlyPubNumber applId appLocation appType appStatus_txt appConfrNumber appCustNumber appGrpArtNumber appCls appSubCls appEntityStatus_txt patentNumber patentTitle primaryInventor firstNamedApplicant firstNamedApplicantNameList wipoEarlyPubNumber pctAppType firstInventorFile appClsSubCls rankAndInventorsList\"\n query = {\"searchText\":\"*:*\",\n\"fq\":[\"patentIssueDate:[{}]\".format(issueDate)],\n\"fl\":\"*\",\n\"mm\":\"100%\",\n\"df\":\"patentTitle\",\n\"qf\":\"{}\".format(variables),\n\"sort\":\"applId asc\",\n\"start\":\"0\"}\n\n # query = json.dumps({\"searchText\":\"firstNamedApplicant:(Google)\",\"fq\":[\"appFilingDate:[2013-01-01T00:00:00Z TO 2013-12-31T23:59:59Z]\",\"appStatus:\\\"Patented Case\\\"\"],\"fl\":\"*\",\"mm\":\"100%\",\"df\":\"patentTitle\",\"qf\":\"appEarlyPubNumber applId appLocation appType appStatus_txt appConfrNumber appCustNumber appGrpArtNumber appCls appSubCls appEntityStatus_txt patentNumber patentTitle primaryInventor firstNamedApplicant appExamName appExamPrefrdName appAttrDockNumber appPCTNumber appIntlPubNumber wipoEarlyPubNumber pctAppType firstInventorFile appClsSubCls rankAndInventorsList\",\"facet\":\"false\",\"sort\":\"applId asc\",\"start\":\"0\"})\n return query\n \ndef download_file(url, index):\n print(\"Downloading {}...\".format(index))\n local_filename = \"downloadedData/dataresultsFile_{}.zip\".format(index)\n # NOTE the stream=True parameter below\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192): \n # If you have chunk encoded response uncomment if\n # and set chunk_size parameter to None.\n #if chunk: \n f.write(chunk)\n if os.path.getsize(local_filename) > 80000000:\n os.remove(local_filename)\n return index\n \n return -1\n\ndef getQueryResults(queryId, downloadIndex):\n \n ## CHECK STATUS UNTIL CAN DOWNLOAD\n jobStatus = \"initiated\"\n count = 0\n while (jobStatus != \"COMPLETED\"):\n url = 'https://ped.uspto.gov/api/queries/{}'.format(queryId)\n queryStatus = requests.get(url, headers={'accept' : 'application/json'})\n\n\n status = queryStatus.status_code\n if status != 200:\n exit(\"non-200 status code.\")\n\n queryStatusDict = queryStatus.json()\n jobStatus = queryStatusDict[\"jobStatus\"]\n print(\"Download {} : {} {}\".format(downloadIndex, queryStatus.status_code, jobStatus))\n print(jobStatus)\n if jobStatus != \"COMPLETED\":\n count = count + 1\n if (count == 20):\n return downloadIndex\n sleep(30)\n\n ## DOWNLOAD THE FILE\n\n url = \"https://ped.uspto.gov/api/queries/{}/download?format=JSON\".format(queryId)\n returnV = download_file(url, downloadIndex)\n sleep(1.6)\n if (returnV == -1):\n print(\"SUCCESS on {}\".format(downloadIndex))\n return -1\n if (returnV != -1):\n print(\"FAIL on : {}\".format(returnV))\n\ndef downloadWorker(q, i):\n # result = getQueryResults(i[0], i[1])\n\n q.put(i[1])\n\n\ndef failureFixing(failNum):\n # for each week split it into each day or something and if the day fails then we dont get that day or somethig/ or maybe go by hour?\n if len(failNum) == 0:\n return failNum\n newFails = []\n print(\"Download failure on : {}\".format(failNum))\n print(\"Retrying...\")\n for f in range(0, len(failNum)):\n queries = []\n for jid in range(0, 4):\n startDate = datetime.fromisoformat(\"2019-01-01T00:00:00\")\n endDate = datetime.fromisoformat(\"2019-01-01T00:00:00\")\n failChange = timedelta(days=7*failNum[f])\n startDate = startDate + failChange\n endDate = endDate + failChange\n\n startHourChange = timedelta(hours=42*jid)\n endHourChange = timedelta(hours=42*(jid+1))\n startDate += startHourChange\n endDate += endHourChange\n print(\"start : {} End : {} \".format(startDate.isoformat(), endDate.isoformat()))\n\n finalDate = datetime.fromisoformat(\"2020-01-01T00:00:00\")\n postJSON = queryformatter(startDate, endDate)\n downloadIndex = 0\n url = 'https://ped.uspto.gov/api/queries'\n postJSON = queryformatter(startDate, endDate)\n url = 'https://ped.uspto.gov/api/queries'\n queryOne = requests.post(url, json=postJSON, headers={'accept' : 'application/json'})\n\n #print(postJSON)\n print(\"query request {} : {}\".format(failNum[f], queryOne.status_code))\n #print(queryOne.content)\n\n if queryOne.status_code != 200 :\n exit(\"Non-200 OK response.\")\n\n\n initialDict = queryOne.json()\n\n firstQueryID = initialDict['queryId']\n\n print(firstQueryID)\n\n jobStatus = \"initiated\"\n\n ## CHECK IF THE JOB HAS BEEN CREATED\n\n while (jobStatus != \"CREATED\"):\n url = 'https://ped.uspto.gov/api/queries/{}'.format(firstQueryID)\n queryStatus = requests.get(url, headers={'accept' : 'application/json'})\n\n print(\"query creation check {} : {}\".format(failNum[f], queryOne.status_code))\n\n status = queryStatus.status_code\n if status != 200:\n exit(\"non-200 status code.\")\n\n queryStatusDict = queryStatus.json()\n jobStatus = queryStatusDict[\"jobStatus\"]\n print(jobStatus)\n if jobStatus != \"CREATED\":\n sleep(30)\n\n\n ## THIS IS WHERE WE WILL GET 416 RESPONSE IF IT IS TOO LONG\n\n url = \"https://ped.uspto.gov/api/queries/{}/package?format=JSON\".format(firstQueryID)\n downloadPut = requests.put(url, headers={\"accept\":\"application/json\"})\n\n status = downloadPut.status_code\n print(downloadPut.content)\n print(status)\n\n if status == 200:\n queries.append((firstQueryID, \"{}_{}\".format(failNum[f], jid)))\n else:\n exit(\"200 status code\")\n\n print(\"Download executing...\")\n for i in queries:\n download_thread = threading.Thread(target=getQueryResults, args=(i[0], i[1]))\n download_thread.start()\n \n return -1\n\n\n \nstartDate = datetime.fromisoformat(\"2019-01-01T00:00:00\")\nendDate = datetime.fromisoformat(\"2019-01-08T00:00:00\")\n\nfinalDate = datetime.fromisoformat(\"2020-01-01T00:00:00\")\npostJSON = queryformatter(startDate, endDate)\ndownloadIndex = 0\nqueries = []\nurl = 'https://ped.uspto.gov/api/queries'\nmyobj = {'somekey': 'somevalue'}\ncomplete = 0\n\nwhile complete != 1:\n\n ## START OF COMMUNICATING WITH SERVER\n postJSON = queryformatter(startDate, endDate)\n url = 'https://ped.uspto.gov/api/queries'\n queryOne = requests.post(url, json=postJSON, headers={'accept' : 'application/json'})\n\n #print(postJSON)\n print(\"query request {} : {}\".format(downloadIndex, queryOne.status_code))\n #print(queryOne.content)\n\n if queryOne.status_code != 200 :\n exit(\"Non-200 OK response.\")\n\n\n initialDict = queryOne.json()\n\n firstQueryID = initialDict['queryId']\n\n print(firstQueryID)\n\n jobStatus = \"initiated\"\n\n ## CHECK IF THE JOB HAS BEEN CREATED\n\n while (jobStatus != \"CREATED\"):\n url = 'https://ped.uspto.gov/api/queries/{}'.format(firstQueryID)\n queryStatus = requests.get(url, headers={'accept' : 'application/json'})\n\n print(\"query creation check {} : {}\".format(downloadIndex, queryOne.status_code))\n\n status = queryStatus.status_code\n if status != 200:\n exit(\"non-200 status code.\")\n\n queryStatusDict = queryStatus.json()\n jobStatus = queryStatusDict[\"jobStatus\"]\n print(jobStatus)\n if jobStatus != \"CREATED\":\n sleep(30)\n\n\n ## THIS IS WHERE WE WILL GET 416 RESPONSE IF IT IS TOO LONG\n\n url = \"https://ped.uspto.gov/api/queries/{}/package?format=JSON\".format(firstQueryID)\n downloadPut = requests.put(url, headers={\"accept\":\"application/json\"})\n\n status = downloadPut.status_code\n print(downloadPut.content)\n print(status)\n\n if status == 200:\n queries.append((firstQueryID, downloadIndex))\n downloadIndex = downloadIndex + 1\n if endDate.isoformat() == \"2020-01-01T00:00:00\":\n complete = 1\n continue\n\n timeChange = timedelta(days=7)\n startDate = datetime.fromisoformat(endDate.isoformat())\n\n endDate = endDate + timeChange\n if (endDate > finalDate) :\n endDate = datetime.fromisoformat(finalDate.isoformat())\n elif status == 416:\n timeChange = timedelta(days=1)\n endDate = endDate - timedelta\n continue\n else:\n exit(\"non 200 or 416 status code\")\n\nprint(\"Download executing...\")\nqueueThing = queue.Queue()\nfails = []\nthreads = []\nfor i in queries:\n download_thread = threading.Thread(target=downloadWorker, args=(queueThing, i))\n download_thread.start()\n threads.append(download_thread)\nfor thr in threads:\n thr.join()\nfor i in queries:\n result = queueThing.get()\n if result != None and result > -1:\n fails.append(result)\n \n\nfailureFixing(fails)\nprint(fails)\nprint(\"finished\")\n# for i in queries:\n# download_thread = threading.Thread(target=getQueryResults, args=(i[0], i[1]))\n# download_thread.start()\n\n\n\n\n\n\n# status == 400\n# while status != 302:\n# url = \"https://ped.uspto.gov/api/queries/{}/download?format=JSON\".format(firstQueryID)\n# download = requests.get(url, headers={'accept' : 'application/json'})\n# status = download.status_code\n# print(download.content)\n# print(status)\n\n# if (status != 200 and status != 302):\n# exit(\"non-200 and non-302 response code.\")\n\n#while statusDict[]\n\n\n\n# x = requests.post(url, json = myobj)\n\n\n\n\n","repo_name":"cheestov/patent-scraper","sub_path":"patentBundling/patentBundler.py","file_name":"patentBundler.py","file_ext":"py","file_size_in_byte":10744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12745684775","text":"import yaml\n\nmy_data = {\n 'device_name': 'rtr1',\n 'ip_addr': '1.1.1.1'\n}\n\nsome_list = list (range(10))\nmy_data['some_list'] = some_list\nmy_data['null_value'] = None\nmy_data['a_bool'] = False\n\nfilename = \"outfile.yml\"\nwith open(filename, \"wt\") as f:\n yaml.dump(my_data, f, default_flow_style=True) #default flow style can be either true or false","repo_name":"derahul9/Python","sub_path":"Data_Structures/wr_yaml.py","file_name":"wr_yaml.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42781611629","text":"# -*- coding: utf-8 -*-\nimport sys\n\ndef failfast(response):\n try:\n print(\"Error code: \" + str(response.status_code) + \" - \" + response.json()[\"error\"])\n print(response.json()[\"message\"])\n except:\n print(\"Unexpected error while failing fast because of invalid request. \" + sys.exc_info()[0])\n sys.exit(-1)\n\n","repo_name":"Martstol/twitch_cli","sub_path":"src/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71671384265","text":"def find_exit(offsets):\n i = 0\n steps = 0\n\n while 0 <= i < len(offsets):\n offset = offsets[i]\n\n if offset >= 3:\n offsets[i] = offset - 1\n else:\n offsets[i] = offset + 1\n\n steps += 1\n i += offset\n\n return steps\n\n\ndef main():\n with open(\"input\") as file:\n offsets = [int(line.rstrip(\"\\n\")) for line in file]\n print(find_exit(offsets))\n\n\nmain()\n","repo_name":"LucasAndersson/adventofcode","sub_path":"day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71763412426","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef get_times(log_file: str):\n times = []\n\n with open(log_file, 'r') as file:\n data_str_list = file.read().strip().split('\\n')\n\n for data_str in data_str_list:\n if not data_str[0].isnumeric():\n break\n times.append(float(data_str[:data_str.find(' ')]))\n\n return times\n\n\ndef get_time_diff(times: list):\n diff = []\n prev = times[0]\n for time in times[1:]:\n diff.append(time - prev)\n prev = time\n\n return diff\n\n\ndef draw_time_diff_graph(log_file):\n time_diff = get_time_diff(get_times(log_file))\n plt.ylabel('Time difference (sec)')\n plt.xlabel('Number of data')\n plt.plot(time_diff)\n plt.show()\n\n\nif __name__ == \"__main__\":\n time_diff = get_time_diff(get_times(\"../raw/log.txt\"))\n print(\"Average:\", np.mean(time_diff))\n print(\"STD:\", np.std(time_diff))\n\n plt.ylabel('Time difference (sec)')\n plt.xlabel('Number of data')\n plt.plot(time_diff)\n plt.show()\n","repo_name":"hoon0422/UMass_Clue","sub_path":"web_crawler/log_analysis.py","file_name":"log_analysis.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33657502674","text":"import librosa\nimport numpy as np\nimport scipy.signal as signal\n\n\ndef compute_mfcc_features(y, sr):\n mfcc_feat = librosa.feature.mfcc(y, sr, n_mfcc=12, n_mels=12, hop_length=int(sr / 100), n_fft=int(sr / 40)).T\n s, phase = librosa.magphase(librosa.stft(y, hop_length=int(sr / 100)))\n rms = librosa.feature.rms(S=s).T\n return np.hstack([mfcc_feat, rms])\n\n\ndef compute_delta_features(mfcc_feat):\n return np.vstack([librosa.feature.delta(mfcc_feat.T), librosa.feature.delta(mfcc_feat.T, order=2)]).T\n\n\ndef collapse_to_start_and_end_frame(instance_list):\n return (instance_list[0], instance_list[-1])\n\n\ndef frame_span_to_time_span(frame_span):\n return (frame_span[0] / 100., frame_span[1] / 100.)\n\n\ndef format_features(mfcc_feat, delta_feat, index, window_size=37):\n return np.append(mfcc_feat[index - window_size:index + window_size],\n delta_feat[index - window_size:index + window_size])\n\n\ndef lowpass(sig, filter_order=2, cutoff=0.01):\n b, a = signal.butter(filter_order, cutoff, output='ba')\n return signal.filtfilt(b, a, sig)\n\n\ndef get_laughter_instances(probs, threshold=0.5, min_length=0.2):\n instances = []\n current_list = []\n for i in range(len(probs)):\n if np.min(probs[i:i + 1]) > threshold:\n current_list.append(i)\n else:\n if len(current_list) > 0:\n instances.append(current_list)\n current_list = []\n instances = [frame_span_to_time_span(collapse_to_start_and_end_frame(i)) for i in instances if len(i) > min_length]\n return instances\n\n\ndef get_feature_list(y, sr, window_size=37):\n mfcc_feat = compute_mfcc_features(y, sr)\n delta_feat = compute_delta_features(mfcc_feat)\n zero_pad_mfcc = np.zeros((window_size, mfcc_feat.shape[1]))\n zero_pad_delta = np.zeros((window_size, delta_feat.shape[1]))\n padded_mfcc_feat = np.vstack([zero_pad_mfcc, mfcc_feat, zero_pad_mfcc])\n padded_delta_feat = np.vstack([zero_pad_delta, delta_feat, zero_pad_delta])\n feature_list = []\n for i in range(window_size, len(mfcc_feat) + window_size):\n feature_list.append(format_features(padded_mfcc_feat, padded_delta_feat, i, window_size))\n feature_list = np.array(feature_list)\n return feature_list\n\n\ndef segment_laughs(input_path, model, threshold=0.5, min_length=0.2):\n y, sr = librosa.load(input_path, sr=8000)\n\n feature_list = get_feature_list(y, sr)\n\n probs = model.predict(feature_list)\n probs = probs.reshape((len(probs),))\n filtered = lowpass(probs)\n instances = get_laughter_instances(filtered, threshold=threshold, min_length=min_length)\n\n if len(instances) > 0 and instances[0][1] - instances[0][0] >= min_length:\n return ([{'start': i[0], 'end': i[1]} for i in instances])\n\n else:\n return \"No laugh\"\n","repo_name":"SsAaFfIiKk/Sound_analyze","sub_path":"laugh_segmenter.py","file_name":"laugh_segmenter.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21029765731","text":"# Python>3.0\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt \nimport matplotlib.patches as pat \nimport matplotlib.colors as cl \nimport math \n\n#colors_list = list(cl._colors_full_map.values())\ncolors_list = cl.cnames \nfig = plt.figure(figsize=(4,3),dpi=300)\nax = fig.add_subplot(111)\n\nratio = 1.0/3.0\ncnt = math.ceil(math.sqrt(len(colors_list)))\nx_cnt = cnt * ratio\ny_cnt = cnt / ratio\nx = 0\ny = 0\nw = 1 / x_cnt\nh = 1 / y_cnt\n\nfor c in colors_list:\n pos = (x / x_cnt, y / y_cnt)\n ax.add_patch(pat.Rectangle(pos,w,h,color=c))\n ax.annotate(c,xy=pos)\n if y >= y_cnt -1:\n x += 1\n y = 0\n else:\n y += 1\nplt.show()","repo_name":"iphylq/myspalette","sub_path":"palettes.py","file_name":"palettes.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39719312934","text":"\"\"\"\nGiven an integer A. Find and Return first positive A integers in ascending order containing only digits 1, 2 and 3.\nNOTE: All the A integers will fit in 32 bit integer.\n\"\"\"\n\nclass Queue:\n def __init__(self, size):\n self.size = size\n self.queue = [None]*self.size\n self.r , self.f = -1, -1\n\n def enqueue(self, val):\n if (self.f == self.r != -1) or (self.f == -1 and self.r == self.size-1):\n #print(\"Full\")\n return None\n else:\n self.r = (self.r + 1)%self.size\n self.queue[self.r] = val\n \n def dequeue(self):\n if self.r == self.f == -1:\n #print(\"Empty\")\n return None\n else:\n self.f = (self.f + 1)%self.size\n temp = self.queue[self.f]\n if self.f == self.r:\n self.r, self.f = -1, -1\n return temp\n def front(self):\n if self.r == self.f == -1:\n #print(\"Empty\")\n return None\n else:\n return self.queue[(self.f+1)%self.size]\n\nclass Solution:\n def solve(self, A):\n queue = Queue(2*A)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n\n count = 0\n ans = []\n while True:\n temp = queue.dequeue()\n count+=1\n ans.append(temp)\n if count == A:\n return ans\n \n queue.enqueue(temp*10 + 1)\n queue.enqueue(temp*10 + 2)\n queue.enqueue(temp*10 + 3)\n\ntest = Solution()\nprint(test.solve(7))\n","repo_name":"anurag5398/DSA-Problems","sub_path":"Queue/NIntegers123.py","file_name":"NIntegers123.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42131494941","text":"class UnionFind:\n\n def __init__(self, n):\n self.parent = {}\n self.rank = {}\n for i in range(1, n + 1):\n # at first every node will be a parent of itself\n self.parent[i] = i\n # at first every node will have a rank of zero\n self.rank[i] = 0\n\n # the union find data structures has a find and union method, the first helps find the ultimate parent of a node as while as apply path compression to improve the time complexity of future find operations. The union method unites the two nodes by first find the ultimae parents\n\n def find(self, node):\n current_parent = self.parent[node]\n\n while current_parent != self.parent[node]:\n self.parent[current_parent] = self.parent[self.parent[current_parent]]\n current_parent = self.parent[current_parent]\n return current_parent\n\n def union(self, n1, n2):\n # we first find the ultimate parent of the nodes\n p1, p2 = self.find(n1), self.find(n2)\n # if two nodes have the same parent then there is node need to unite them\n if p1 == p2:\n return False\n # now we need to determine how we attach these two nodes based on their rank\n if self.rank[p1] > self.rank[p2]:\n self.parent[p2] = p1\n elif self.rank[p1] < self.rank[p2]:\n self.parent[p1] = p2\n else:\n self.parent[p1] = p2\n self.rank[p2] += 1\n return True\n","repo_name":"mdiallo98/python-dataStructures-Algos","sub_path":"DataStructure/unionFind.py","file_name":"unionFind.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"32926048807","text":"def pareto(*pairs):\n res = []\n\n for i in pairs:\n for j in pairs:\n if i[0] <= j[0] and i[1] <= j[1] and (i[0] < j[0] or i[1] < j[1]):\n break\n else:\n res.append(i)\n\n return tuple(res)\n\nprint(pareto(*eval(input())))","repo_name":"satoad/pythonprac","sub_path":"20221004/2/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41570100011","text":"from datetime import datetime, timedelta\n\nheadless_setting = True\nbase_url_settings = \"https://www.booking.com/\"\n\n\ndef format_date(date_string, days=0):\n date_obj = datetime.strptime(date_string, '%d.%m.%Y')\n end_date = date_obj + timedelta(days=days)\n clicks = (date_obj.year - datetime.now().year) * 12 + (date_obj.month - datetime.now().month)\n start_date_str = date_obj.strftime('%Y-%m-%d')\n if datetime.now().day >= date_obj.day:\n clicks -= 1\n if days != 0:\n clicks += days // 30\n end_date_str = end_date.strftime('%Y-%m-%d')\n return clicks, start_date_str, end_date_str\n else:\n return clicks, start_date_str\n\n","repo_name":"elizabethmalikova/booking_web_tests","sub_path":"settings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23310162208","text":"from nvitop import ResourceMetricCollector, Device, collect_in_background\nimport sys\nimport os\nimport time\nimport logging\nimport json\nimport threading\n\n# Configure the logger to write to a file\n\ndef print_log(filename, collector_metric):\n with open(filename, \"a\") as file:\n json.dump(collector_metric, file)\n file.write(\"\\n\")\n\nif len(sys.argv) != 5:\n print(\"Usage: python3 collect.py filename tagname interval duration\")\n exit(1)\n\nfilename = sys.argv[1]\ntagname = sys.argv[2]\ninterval = float(sys.argv[3])\nduartion = float(sys.argv[4])\n\nstart_time = time.time()\n\ndef run():\n collector = ResourceMetricCollector(devices=Device.cuda.all())\n with collector(tag=tagname):\n if time.time() - start_time < duartion:\n threading.Timer(interval, run).start() # schedule the function to run\n collector_metric = collector.collect()\n print_log(filename, collector_metric)\n else:\n return\n\nrun()\n","repo_name":"Chivier/EIDF_util_scripts","sub_path":"collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4848124704","text":"import models.GenericSwarmController as GenericSwarmController\nimport numpy as np\nimport math\nimport random\nfrom sim_tools.sim import SimParams\nfrom shapely import geometry\n\n#need to review paper, this is not working as expected, might need to add walls\nclass Boids(GenericSwarmController.GenericSwarmController): \n def __init__(self,align_gain,cohesion_gain,separation_gain,inertia,params=SimParams()):\n self.alignment_gain = align_gain\n self.cohesion_gain = cohesion_gain\n self.separation_gain = separation_gain\n self.inertia = inertia\n self.params = params\n \n def vel(self,agentPositions,agentVels,pos,v):\n if(len(agentPositions) == 0):\n return v*self.inertia\n v_gain = np.zeros(2)\n \n centroidPos = np.zeros(2)\n for position in agentPositions:\n centroidPos += position\n centroidPos /= len(agentPositions)\n\n v_gain += self.cohesion_gain*(centroidPos-pos)\n\n #pretty sure I need some kind of \n centroidVel = np.zeros(2)\n for vel in agentVels:\n centroidVel += vel\n centroidVel /= len(agentVels)\n\n v_gain += self.alignment_gain*centroidVel\n\n #steer-to-avoid\n origin = geometry.Point(pos[0],pos[1])\n\n # need to pass params for neighborhood stuff\n orientation = (v/np.linalg.norm(v))*self.params.neighbor_radius\n toward = geometry.Point(pos[0]+orientation[0],pos[1]+orientation[1]) \n\n velLine = geometry.LineString([origin,toward])\n\n separation = np.zeros(2)\n # this can be considered a property of the environment?, how is agent's size defined\n collision_distance = self.params.neighbor_radius/4\n\n closest = np.zeros(2)\n closestDist = np.inf\n\n for position in agentPositions:\n other = geometry.Point(position[0],position[1])\n collision = other.buffer(collision_distance)\n if velLine.intersects(collision):\n dist = np.linalg.norm(position-pos)\n if(dist < closestDist):\n closestDist = dist\n closest = position\n \n #assign separation\n if closestDist != np.inf:\n mag = 1/(closestDist**2)\n #figure out side\n diffPos = closest-pos\n #project onto v, grab remaining component as orthogonal direction\n v_hat = v/np.linalg.norm(v)\n d_v = np.dot(v_hat,diffPos)*v_hat\n remaining = diffPos - d_v\n if np.linalg.norm(remaining) > 0:\n remaining = remaining/np.linalg.norm(remaining)\n separation = remaining*mag\n\n v_gain += self.separation_gain*separation\n\n v_out = v*self.inertia + v_gain\n return v_out","repo_name":"wvu-robotics/REU_MatlabSim","sub_path":"matlab/REU_2022/Topic_1_ Imitating_Swarms/SwarmSimClassSeparationPy/models/SteerControl.py","file_name":"SteerControl.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31249043623","text":"'''\n Making asynchronous HTTP Requests to make a non-blocking code.\n\n Asynchronous routines are able to pause while waiting on their ultimate result to let other routines run in the meantime.\n So, asynchornous code through above mechanism facilitates concurrent execution. It doesn't \"block\" other code from running so we call it as \"non-blocking\" code.\n\n HTTP requests are a classic example of something that is well-suited to asynchronicity because they involve waiting for a response from a server, during\n which time it would be convinient and efficient to have other code running.\n\n Here, we will run both asynchronous HTTP request using aiohttp and synchronous HTTP request using requests library and compare.\n'''\n\n# pip install aiohttp==3.7.4.post0\n# pip install requests==2.25.1\n\n\n# We will use the GET request to get the data from Pokemon API\n\n# import asyncio\n# from time import time\n# import aiohttp\n\n# async def main():\n \n# async with aiohttp.ClientSession() as session:\n# pokemon_url = 'https://pokeapi.co/api/v2/pokemon/151'\n \n# async with session.get(pokemon_url) as resp:\n# pokemon = await resp.json()\n# print(pokemon['name'])\n\n# asyncio.run(main())\n\n'''\nIn this code we are creating a coroutine called \"main\" which are running with asyncio event loop. (You can think of an event loop as something like a \nwhile True loop that monitors coroutines, taking feedback on what's idle, and looking around for things that can be executed in the meantime.\n\nHere we are opening an aiohttp client session, a single object that can be used for quite a number of individual requests and by default can make connections\nwith up to 100 different servers at a time. With this session, we are making a request to the Pokemon API and then waiting a response.\n\nThis async keyword basically tells the Python interpreter that the couroutine we are defining should be run asynchronously with an event loop.\nThe await keyword passes control back to the event loop, suspending the execution of the surrounding coroutine and letting the event loop run other things \nuntil the result that is being awaited is returned.\n'''\n\n'''\nMaking large number of requests\n'''\n\n# import aiohttp\n# import asyncio\n# import time\n\n# start_time = time.time()\n\n# async def main():\n\n# async with aiohttp.ClientSession() as session:\n\n# # all 150 of the original Pokemon\n# for number in range(1, 151):\n# pokemon_url = f'https://pokeapi.co/api/v2/pokemon/{number}'\n# async with session.get(pokemon_url) as resp:\n# pokemon = await resp.json()\n# print(pokemon['name'])\n\n# asyncio.run(main())\n# print(\"---- %s seconds ---\" % (time.time() - start_time))\n\n\n'''\n It takes 28.33 seconds\n Let's compare with the synchronous execution of the HTTP request using requests library.\n\n'''\n\n# import requests\n# import time\n\n# start_time = time.time()\n\n# for number in range(1, 151):\n# url = f'https://pokeapi.co/api/v2/pokemon/{number}'\n# resp = requests.get(url)\n# pokemon = resp.json()\n# print(pokemon['name'])\n\n# print(\"---- %s seconds---\" % (time.time() - start_time))\n\n'''\nFor each consecutive request, we have to wait for the previous step to finish before even beginning the process. \nIt takes much longer because this code is waiting for 150 requests to finish sequentially\nIt takes : 78.28 secs\n'''\n\n'''\nUtilizing asyncio for improved performance\n\nIn the original example, we are using \"await\" after each individual HTTP request, which isn't quite ideal.\nIt is still faster than the requests example because we are running everything in coroutines.\n\nInstead we can run all these requests \"concurrently\" as asyncio tasks and then check the results at the end, using \nasyncio.ensure_future as asyncio.gather\n\nThis is how it will work. If the code that actually makes the request is broken out into own coroutine function, we can create a list of tasks,\nconsisting of futures for each request. We can then unpack this list to a gather call, which runs them all together.\nWhen we \"await\" this call to asyncio.gather, we will get back an iterable for all of the futures that were passed in, maintaining their order in the list.\nThis way we are only awaiting one time\n'''\n\nimport aiohttp\nimport asyncio\nimport time\n\nstart_time = time.time()\n\nasync def get_pokemon(session, url):\n async with session.get(url) as resp:\n pokemon = await resp.json()\n return pokemon['name']\n\nasync def main():\n\n async with aiohttp.ClientSession() as session:\n\n tasks = []\n for number in range(1, 151):\n url = f'https://pokeapi.co/api/v2/pokemon/{number}'\n tasks.append(asyncio.ensure_future(get_pokemon(session, url)))\n\n original_pokemon = await asyncio.gather(*tasks)\n for pokemon in original_pokemon:\n print(pokemon)\n\n\nasyncio.run(main())\nprint(\"--- %s seconds --- \" % (time.time() - start_time))\n\n'''\n This takes 1.17 seconds. This example is completely non-blocking. so the total time to run all 150 requests is going to be roughly equal to the amount of \n time that the longest request took to run.\n'''\n\n","repo_name":"SuvroBaner/software_engineering","sub_path":"Python/async_http_requests.py","file_name":"async_http_requests.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41812054753","text":"def binary_search(keys, query):\r\n # write your code here\r\n low=0\r\n high=len(keys)-1\r\n while low<=high:\r\n mid=(low+high)//2\r\n if query==keys[mid]:\r\n return mid\r\n else:\r\n if query bool:\n raise NotImplementedError\n\n\n@dataclass\nclass MinimumConstraint(Constraint):\n minimum: int\n\n def validate(self, data: Any) -> bool:\n return data >= self.minimum\n\n\n@dataclass\nclass MaximumConstraint(Constraint):\n maximum: int\n\n def validate(self, data: Any) -> bool:\n return data <= self.maximum\n\n\n@dataclass\nclass ExclusiveMinimumConstraint(Constraint):\n exc_min: int\n\n def validate(self, data: Any) -> bool:\n return data > self.exc_min\n\n\n@dataclass\nclass ExclusiveMaximumConstraint(Constraint):\n exc_max: int\n\n def validate(self, data: Any) -> bool:\n return data < self.exc_max\n\n\n@dataclass\nclass MultipleOfConstraint(Constraint):\n mult_of: int\n\n def validate(self, data: Any) -> bool:\n return not (data % self.mult_of)\n\n\n@dataclass\nclass MinLengthConstraint(Constraint):\n min_len: int\n\n def validate(self, data: Any) -> bool:\n return len(data) >= self.min_len\n\n\n@dataclass\nclass MaxLengthConstraint(Constraint):\n max_len: int\n\n def validate(self, data: Any) -> bool:\n return len(data) <= self.max_len\n\n\n@dataclass\nclass PatternConstraint(Constraint):\n pattern: Pattern\n\n def validate(self, data: Any) -> bool:\n return self.pattern.match(data) is not None\n\n\n@dataclass\nclass MinItemsConstraint(Constraint):\n min_items: int\n\n def validate(self, data: Any) -> bool:\n return len(data) >= self.min_items\n\n\n@dataclass\nclass MaxItemsConstraint(Constraint):\n max_items: int\n\n def validate(self, data: Any) -> bool:\n return len(data) <= self.max_items\n\n\ndef to_hashable(data: Any) -> Any:\n if isinstance(data, list):\n return tuple(map(to_hashable, data))\n elif isinstance(data, dict):\n sorted_keys = sorted(data)\n return tuple(sorted_keys + [to_hashable(data[k]) for k in sorted_keys])\n else:\n return data\n\n\n@dataclass\nclass UniqueItemsConstraint(Constraint):\n unique: bool\n\n def __post_init__(self):\n assert self.unique\n\n def validate(self, data: Any) -> bool:\n return len(set(map(to_hashable, data))) == len(data)\n\n\n@dataclass\nclass MinPropertiesConstraint(Constraint):\n min_properties: int\n\n def validate(self, data: Any) -> bool:\n return len(data) >= self.min_properties\n\n\n@dataclass\nclass MaxPropertiesConstraint(Constraint):\n max_properties: int\n\n def validate(self, data: Any) -> bool:\n return len(data) <= self.max_properties\n\n\ndef format_error(err: Union[str, Callable[[Any], str]], data: Any) -> str:\n return err if isinstance(err, str) else err(data)\n\n\nErrorDict = Dict[ErrorKey, ValidationError]\n\n\ndef validate_constraints(\n data: Any, constraints: Tuple[Constraint, ...], children_errors: Optional[ErrorDict]\n) -> Any:\n for i in range(len(constraints)):\n constraint: Constraint = constraints[i]\n if not constraint.validate(data):\n errors: List[str] = [format_error(constraint.error, data)]\n for j in range(i + 1, len(constraints)):\n constraint = constraints[j]\n if not constraint.validate(data):\n errors.append(format_error(constraint.error, data))\n raise ValidationError(errors, children_errors or {})\n if children_errors:\n raise ValidationError([], children_errors)\n return data\n\n\ndef set_child_error(\n errors: Optional[ErrorDict], key: ErrorKey, error: ValidationError\n) -> ErrorDict:\n if errors is None:\n return {key: error}\n else:\n errors[key] = error\n return errors\n\n\nclass DeserializationMethod:\n def deserialize(self, data: Any) -> Any:\n raise NotImplementedError\n\n\n@dataclass\nclass RecMethod(DeserializationMethod):\n lazy: Lazy[DeserializationMethod]\n method: Optional[DeserializationMethod] = field(init=False)\n\n def __post_init__(self):\n self.method = None\n\n def deserialize(self, data: Any) -> Any:\n if self.method is None:\n self.method = self.lazy()\n return self.method.deserialize(data)\n\n\n@dataclass\nclass ValidatorMethod(DeserializationMethod):\n method: DeserializationMethod\n validators: Sequence[Validator]\n aliaser: Aliaser\n\n def deserialize(self, data: Any) -> Any:\n return validate(\n self.method.deserialize(data), self.validators, aliaser=self.aliaser\n )\n\n\n@dataclass\nclass CoercerMethod(DeserializationMethod):\n coercer: Coercer\n cls: type\n method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n return self.method.deserialize(self.coercer(self.cls, data))\n\n\n@dataclass\nclass TypeCheckMethod(DeserializationMethod):\n expected: AnyType # `type` would require exact match (i.e. no EnumMeta)\n fallback: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n if isinstance(data, self.expected):\n return data\n return self.fallback.deserialize(data)\n\n\n@dataclass\nclass AnyMethod(DeserializationMethod):\n constraints: Dict[type, Tuple[Constraint, ...]]\n\n def deserialize(self, data: Any) -> Any:\n if type(data) in self.constraints:\n validate_constraints(data, self.constraints[type(data)], None)\n return data\n\n\n@dataclass\nclass ListCheckOnlyMethod(DeserializationMethod):\n constraints: Tuple[Constraint, ...]\n value_method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, list):\n raise bad_type(data, list)\n elt_errors: Optional[ErrorDict] = None\n for i, elt in enumerate(data):\n try:\n self.value_method.deserialize(elt)\n except ValidationError as err:\n elt_errors = set_child_error(elt_errors, i, err)\n validate_constraints(data, self.constraints, elt_errors)\n return data\n\n\n@dataclass\nclass ListMethod(DeserializationMethod):\n constraints: Tuple[Constraint, ...]\n value_method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, list):\n raise bad_type(data, list)\n elt_errors: Optional[ErrorDict] = None\n values: list = [None] * len(data)\n for i, elt in enumerate(data):\n try:\n values[i] = self.value_method.deserialize(elt)\n except ValidationError as err:\n elt_errors = set_child_error(elt_errors, i, err)\n validate_constraints(data, self.constraints, elt_errors)\n return values\n\n\n@dataclass\nclass SetMethod(DeserializationMethod):\n constraints: Tuple[Constraint, ...]\n value_method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, list):\n raise bad_type(data, list)\n elt_errors: ErrorDict = {}\n values: set = set()\n for i, elt in enumerate(data):\n try:\n values.add(self.value_method.deserialize(elt))\n except ValidationError as err:\n elt_errors = set_child_error(elt_errors, i, err)\n validate_constraints(data, self.constraints, elt_errors)\n return values\n\n\n@dataclass\nclass FrozenSetMethod(DeserializationMethod):\n method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n return frozenset(self.method.deserialize(data))\n\n\n@dataclass\nclass VariadicTupleMethod(DeserializationMethod):\n method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n return tuple(self.method.deserialize(data))\n\n\n@dataclass\nclass LiteralMethod(DeserializationMethod):\n value_map: dict\n error: Union[str, Callable[[Any], str]]\n coercer: Optional[Coercer]\n types: Tuple[type, ...]\n\n def deserialize(self, data: Any) -> Any:\n try:\n return self.value_map[data]\n except KeyError:\n if self.coercer is not None:\n for cls in self.types:\n try:\n return self.value_map[self.coercer(cls, data)]\n except IndexError:\n pass\n raise ValidationError(format_error(self.error, data))\n except TypeError:\n raise bad_type(data, *self.types)\n\n\n@dataclass\nclass MappingCheckOnly(DeserializationMethod):\n constraints: Tuple[Constraint, ...]\n key_method: DeserializationMethod\n value_method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, dict):\n raise bad_type(data, dict)\n item_errors: Optional[ErrorDict] = None\n for key, value in data.items():\n try:\n self.key_method.deserialize(key)\n self.value_method.deserialize(value)\n except ValidationError as err:\n item_errors = set_child_error(item_errors, key, err)\n validate_constraints(data, self.constraints, item_errors)\n return data\n\n\n@dataclass\nclass MappingMethod(DeserializationMethod):\n constraints: Tuple[Constraint, ...]\n key_method: DeserializationMethod\n value_method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, dict):\n raise bad_type(data, dict)\n item_errors: Optional[ErrorDict] = None\n items: dict = {}\n for key, value in data.items():\n try:\n items[self.key_method.deserialize(key)] = self.value_method.deserialize(\n value\n )\n except ValidationError as err:\n item_errors = set_child_error(item_errors, key, err)\n validate_constraints(data, self.constraints, item_errors)\n return items\n\n\n@dataclass\nclass Field:\n name: str\n alias: str\n method: DeserializationMethod\n required: bool\n required_by: Optional[AbstractSet[str]]\n fall_back_on_default: bool\n\n\n@dataclass\nclass FlattenedField:\n name: str\n aliases: Tuple[str, ...]\n method: DeserializationMethod\n fall_back_on_default: bool\n\n\n@dataclass\nclass PatternField:\n name: str\n pattern: Pattern\n method: DeserializationMethod\n fall_back_on_default: bool\n\n\n@dataclass\nclass AdditionalField:\n name: str\n method: DeserializationMethod\n fall_back_on_default: bool\n\n\n@dataclass\nclass Constructor:\n cls: Any # cython doesn't handle type subclasses properly\n\n def construct(self, fields: Dict[str, Any]) -> Any:\n raise NotImplementedError\n\n\nclass NoConstructor(Constructor):\n def construct(self, fields: Dict[str, Any]) -> Any:\n return fields\n\n\ndef PyObject_Call(obj, args, kwargs):\n return obj(*args, **kwargs)\n\n\nclass RawConstructor(Constructor):\n def construct(self, fields: Dict[str, Any]) -> Any:\n return PyObject_Call(self.cls, (), fields)\n\n\nclass RawConstructorCopy(Constructor):\n def construct(self, fields: Dict[str, Any]) -> Any:\n return self.cls(**fields)\n\n\n@dataclass\nclass DefaultField:\n name: str\n default_value: Any # https://github.com/cython/cython/issues/4383\n\n\n@dataclass\nclass FactoryField:\n name: str\n factory: Callable\n\n\n@dataclass\nclass FieldsConstructor(Constructor):\n nb_fields: int\n default_fields: Tuple[DefaultField, ...]\n factory_fields: Tuple[FactoryField, ...]\n\n def construct(self, fields: Any) -> Any: # fields can be a dict subclass\n obj = object.__new__(self.cls)\n obj_dict: dict = obj.__dict__\n obj_dict.update(fields)\n if len(fields) != self.nb_fields:\n for default_field in self.default_fields:\n if default_field.name not in obj_dict:\n obj_dict[default_field.name] = default_field.default_value\n for factory_field in self.factory_fields:\n if factory_field.name not in obj_dict:\n obj_dict[factory_field.name] = factory_field.factory()\n return obj\n\n\n@dataclass\nclass SimpleObjectMethod(DeserializationMethod):\n constructor: Constructor\n fields: Tuple[Field, ...]\n all_aliases: AbstractSet[str]\n typed_dict: bool\n missing: str\n unexpected: str\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, dict):\n raise bad_type(data, dict)\n fields_count: int = 0\n field_errors: Optional[dict] = None\n for field in self.fields:\n if field.alias in data:\n fields_count += 1\n try:\n field.method.deserialize(data[field.alias])\n except ValidationError as err:\n if field.required or not field.fall_back_on_default:\n field_errors = set_child_error(field_errors, field.alias, err)\n elif field.required:\n field_errors = set_child_error(\n field_errors, field.alias, ValidationError(self.missing)\n )\n if len(data) != fields_count and not self.typed_dict:\n for key in data.keys() - self.all_aliases:\n field_errors = set_child_error(\n field_errors, key, ValidationError(self.unexpected)\n )\n if field_errors:\n raise ValidationError([], field_errors)\n return self.constructor.construct(data)\n\n\ndef extend_errors(\n errors: Optional[List[ErrorMsg]], messages: Sequence[ErrorMsg]\n) -> List[ErrorMsg]:\n if errors is None:\n return list(messages)\n else:\n errors.extend(messages)\n return errors\n\n\ndef update_children_errors(\n errors: Optional[Dict[ErrorKey, ValidationError]],\n children: Mapping[ErrorKey, ValidationError],\n) -> Dict[ErrorKey, ValidationError]:\n if errors is None:\n return dict(children)\n else:\n errors.update(children)\n return errors\n\n\n@dataclass\nclass ObjectMethod(DeserializationMethod):\n constructor: Constructor\n constraints: Tuple[Constraint, ...]\n fields: Tuple[Field, ...]\n flattened_fields: Tuple[FlattenedField, ...]\n pattern_fields: Tuple[PatternField, ...]\n additional_field: Optional[AdditionalField]\n all_aliases: AbstractSet[str]\n additional_properties: bool\n typed_dict: bool\n validators: Tuple[Validator, ...]\n init_defaults: Tuple[Tuple[str, Optional[Callable[[], Any]]], ...]\n post_init_modified: AbstractSet[str]\n aliaser: Aliaser\n missing: str\n unexpected: str\n discriminator: Optional[str]\n aggregate_fields: bool = field(init=False)\n\n def __post_init__(self):\n self.aggregate_fields = bool(\n self.flattened_fields\n or self.pattern_fields\n or self.additional_field is not None\n )\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, dict):\n raise bad_type(data, dict)\n values: dict = {}\n fields_count: int = 0\n errors: Optional[list] = None\n try:\n validate_constraints(data, self.constraints, None)\n except ValidationError as err:\n errors = list(err.messages)\n field_errors: Optional[dict] = None\n for field in self.fields:\n if field.alias in data:\n fields_count += 1\n try:\n values[field.name] = field.method.deserialize(data[field.alias])\n except ValidationError as err:\n if field.required or not field.fall_back_on_default:\n field_errors = set_child_error(field_errors, field.alias, err)\n elif field.required:\n field_errors = set_child_error(\n field_errors, field.alias, ValidationError(self.missing)\n )\n elif field.required_by is not None and not field.required_by.isdisjoint(\n data\n ):\n requiring = sorted(field.required_by & data.keys())\n error = ValidationError([self.missing + f\" (required by {requiring})\"])\n field_errors = set_child_error(field_errors, field.alias, error)\n if self.aggregate_fields:\n remain = data.keys() - self.all_aliases\n for flattened_field in self.flattened_fields:\n flattened: dict = {\n alias: data[alias]\n for alias in flattened_field.aliases\n if alias in data\n }\n remain.difference_update(flattened)\n try:\n values[flattened_field.name] = flattened_field.method.deserialize(\n flattened\n )\n except ValidationError as err:\n if not flattened_field.fall_back_on_default:\n errors = extend_errors(errors, err.messages)\n field_errors = update_children_errors(\n field_errors, err.children\n )\n for pattern_field in self.pattern_fields:\n matched: dict = {\n key: data[key] for key in remain if pattern_field.pattern.match(key)\n }\n remain.difference_update(matched)\n try:\n values[pattern_field.name] = pattern_field.method.deserialize(\n matched\n )\n except ValidationError as err:\n if not pattern_field.fall_back_on_default:\n errors = extend_errors(errors, err.messages)\n field_errors = update_children_errors(\n field_errors, err.children\n )\n if self.additional_field is not None:\n additional: dict = {key: data[key] for key in remain}\n try:\n values[\n self.additional_field.name\n ] = self.additional_field.method.deserialize(additional)\n except ValidationError as err:\n if not self.additional_field.fall_back_on_default:\n errors = extend_errors(errors, err.messages)\n field_errors = update_children_errors(\n field_errors, err.children\n )\n elif remain:\n if not self.additional_properties:\n for key in remain:\n if key != self.discriminator:\n field_errors = set_child_error(\n field_errors, key, ValidationError(self.unexpected)\n )\n elif self.typed_dict:\n for key in remain:\n values[key] = data[key]\n elif len(data) != fields_count:\n if not self.additional_properties:\n for key in data.keys() - self.all_aliases:\n if key != self.discriminator:\n field_errors = set_child_error(\n field_errors, key, ValidationError(self.unexpected)\n )\n elif self.typed_dict:\n for key in data.keys() - self.all_aliases:\n values[key] = data[key]\n if self.validators:\n init = None\n if self.init_defaults:\n init = {}\n for name, default_factory in self.init_defaults:\n if name in values:\n init[name] = values[name]\n elif not field_errors or name not in field_errors:\n assert default_factory is not None\n init[name] = default_factory()\n aliases = values.keys()\n # Don't keep validators when all dependencies are default\n validators = [\n v for v in self.validators if not v.dependencies.isdisjoint(aliases)\n ]\n if field_errors or errors:\n error = ValidationError(errors or [], field_errors or {})\n invalid_fields = self.post_init_modified\n if field_errors:\n invalid_fields = invalid_fields | field_errors.keys()\n try:\n validate(\n ValidatorMock(self.constructor.cls, values),\n [\n v\n for v in validators\n if v.dependencies.isdisjoint(invalid_fields)\n ],\n init,\n aliaser=self.aliaser,\n )\n except ValidationError as err:\n error = merge_errors(error, err)\n raise error\n obj = self.constructor.construct(values)\n return validate(obj, validators, init, aliaser=self.aliaser)\n elif field_errors or errors:\n raise ValidationError(errors or [], field_errors or {})\n return self.constructor.construct(values)\n\n\nclass NoneMethod(DeserializationMethod):\n def deserialize(self, data: Any) -> Any:\n if data is not None:\n raise bad_type(data, NoneType)\n return data\n\n\nclass IntMethod(DeserializationMethod):\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, int) or isinstance(data, bool):\n raise bad_type(data, int)\n return data\n\n\nclass FloatMethod(DeserializationMethod):\n def deserialize(self, data: Any) -> Any:\n if isinstance(data, float):\n return data\n elif isinstance(data, int):\n return float(data)\n else:\n raise bad_type(data, float)\n\n\nclass StrMethod(DeserializationMethod):\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, str):\n raise bad_type(data, str)\n return data\n\n\nclass BoolMethod(DeserializationMethod):\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, bool):\n raise bad_type(data, bool)\n return data\n\n\n@dataclass\nclass ConstrainedIntMethod(IntMethod):\n constraints: Tuple[Constraint, ...]\n\n def deserialize(self, data: Any) -> Any:\n return validate_constraints(super().deserialize(data), self.constraints, None)\n\n\n@dataclass\nclass ConstrainedFloatMethod(FloatMethod):\n constraints: Tuple[Constraint, ...]\n\n def deserialize(self, data: Any) -> Any:\n return validate_constraints(super().deserialize(data), self.constraints, None)\n\n\n@dataclass\nclass ConstrainedStrMethod(StrMethod):\n constraints: Tuple[Constraint, ...]\n\n def deserialize(self, data: Any) -> Any:\n return validate_constraints(super().deserialize(data), self.constraints, None)\n\n\n@dataclass\nclass SubprimitiveMethod(DeserializationMethod):\n cls: type\n method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n return self.cls(self.method.deserialize(data))\n\n\n@dataclass\nclass TupleMethod(DeserializationMethod):\n constraints: Tuple[Constraint, ...]\n min_len_error: Union[str, Callable[[Any], str]]\n max_len_error: Union[str, Callable[[Any], str]]\n elt_methods: Tuple[DeserializationMethod, ...]\n\n def deserialize(self, data: Any) -> Any:\n if not isinstance(data, list):\n raise bad_type(data, list)\n data_len = len(data)\n if data_len != len(self.elt_methods):\n if data_len < len(self.elt_methods):\n raise ValidationError(format_error(self.min_len_error, data))\n elif data_len > len(self.elt_methods):\n raise ValidationError(format_error(self.max_len_error, data))\n else:\n raise NotImplementedError\n elt_errors: Optional[ErrorDict] = None\n elts: list = [None] * len(self.elt_methods)\n for i, elt_method in enumerate(self.elt_methods):\n try:\n elts[i] = elt_method.deserialize(data[i])\n except ValidationError as err:\n set_child_error(elt_errors, i, err)\n validate_constraints(data, self.constraints, elt_errors)\n return tuple(elts)\n\n\n@dataclass\nclass OptionalMethod(DeserializationMethod):\n value_method: DeserializationMethod\n coercer: Optional[Coercer]\n\n def deserialize(self, data: Any) -> Any:\n if data is None:\n return None\n try:\n return self.value_method.deserialize(data)\n except ValidationError as err:\n if self.coercer is not None and self.coercer(NoneType, data) is None:\n return None\n else:\n raise merge_errors(err, bad_type(data, NoneType))\n\n\n@dataclass\nclass UnionByTypeMethod(DeserializationMethod):\n method_by_cls: Dict[type, DeserializationMethod]\n\n def deserialize(self, data: Any) -> Any:\n try:\n method: DeserializationMethod = self.method_by_cls[type(data)]\n return method.deserialize(data)\n except KeyError:\n raise bad_type(data, *self.method_by_cls) from None\n except ValidationError as err:\n other_classes = (cls for cls in self.method_by_cls if cls is not type(data))\n raise merge_errors(err, bad_type(data, *other_classes))\n\n\n@dataclass\nclass UnionMethod(DeserializationMethod):\n alt_methods: Tuple[DeserializationMethod, ...]\n\n def deserialize(self, data: Any) -> Any:\n error = None\n for i, alt_method in enumerate(self.alt_methods):\n try:\n return alt_method.deserialize(data)\n except ValidationError as err:\n error = merge_errors(error, err)\n assert error is not None\n raise error\n\n\n@dataclass\nclass ConversionMethod(DeserializationMethod):\n converter: Converter\n method: DeserializationMethod\n\n def deserialize(self, data: Any) -> Any:\n return self.converter(self.method.deserialize(data))\n\n\n@dataclass\nclass ConversionWithValueErrorMethod(ConversionMethod):\n def deserialize(self, data: Any) -> Any:\n value = self.method.deserialize(data)\n try:\n return self.converter(value)\n except ValueError as err:\n raise ValidationError(str(err))\n\n\n@dataclass\nclass ConversionAlternative:\n converter: Converter\n method: DeserializationMethod\n value_error: bool\n\n\n@dataclass\nclass ConversionUnionMethod(DeserializationMethod):\n alternatives: Tuple[ConversionAlternative, ...]\n\n def deserialize(self, data: Any) -> Any:\n error = None\n for alternative in self.alternatives:\n try:\n value = alternative.method.deserialize(data)\n except ValidationError as err:\n error = merge_errors(error, err)\n continue\n try:\n return alternative.converter(value)\n except ValidationError as err:\n error = merge_errors(error, err)\n except ValueError as err:\n if not alternative.value_error:\n raise\n error = merge_errors(error, ValidationError(str(err)))\n assert error is not None\n raise error\n\n\n@dataclass\nclass DiscriminatorMethod(DeserializationMethod):\n alias: str\n mapping: Dict[str, DeserializationMethod]\n missing: str\n error: Union[str, Callable[[Any], str]]\n\n def deserialize(self, data: Any):\n if not isinstance(data, dict):\n raise bad_type(data, dict)\n if self.alias not in data:\n raise ValidationError([], {self.alias: ValidationError(self.missing)})\n try:\n method: DeserializationMethod = self.mapping[data[self.alias]]\n except (TypeError, KeyError):\n raise ValidationError(\n [],\n {\n self.alias: ValidationError(\n format_error(self.error, data[self.alias])\n )\n },\n )\n else:\n return method.deserialize(data)\n","repo_name":"wyfo/apischema","sub_path":"apischema/deserialization/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":28723,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"81"} +{"seq_id":"36143662624","text":"n=int(input(\"Enter the number of terms =\"))\r\na=0\r\nb=1\r\nif n==1:\r\n print(\"0\")\r\nelif n<0:\r\n print(\"please give positive numbers!!!\")\r\nelse:\r\n while n>0:\r\n print(a)\r\n c=a+b\r\n a=b\r\n b=c\r\n n=n-1\r\n \r\n \r\n","repo_name":"Savisrisundar/BASICS-OF-PYTHON","sub_path":"BASICS/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35201820918","text":"#PROBLM LINK : https://www.hackerrank.com/challenges/chocolate-feast/problem\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef chocolateFeast(n, c, m):\n # Write your code here\n chocapiece = n//c\n wrapper = chocapiece\n while wrapper>=m:\n chocapiece+= wrapper//m\n wrapper = wrapper//m + wrapper%m\n \n return chocapiece\n \n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input().strip())\n\n for t_itr in range(t):\n first_multiple_input = input().rstrip().split()\n\n n = int(first_multiple_input[0])\n\n c = int(first_multiple_input[1])\n\n m = int(first_multiple_input[2])\n\n result = chocolateFeast(n, c, m)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","repo_name":"RajB07/DS-Algo-Problem-Solving","sub_path":"choclate feast.py","file_name":"choclate feast.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"38759590700","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.core.multiarray import ndarray\nfrom numpy import sign\nfrom typing import Optional, Union, Tuple\n\n\"\"\"\n1\tTopic Introduction\n--------------------------\nDetermine the roots of f(x) = -12-21x-18x&2-2.75x^3 through various root finding techniques.\n\n2\tTopic Theory/Approach\n--------------------------\nUtilize incremental function to search the roots as a sense of where the roots are.\nUse bisection and false position functions to estimate the roots.\n\n3\tProblems\n--------------------------\n (a)\tProblem Statement\n \nDetermine the roots of f(x) = 􀀀12 􀀀 21x 􀀀 18x2 􀀀 2:75x3 between xl = 􀀀10 and xu = 10, and a\nstopping criterion of 0.1%. You should use your icremental function \frst to \fnd all roots, and use\nthose ranges to get better results using the bisection and false position functions.\n\n (b)\tProblem Approach\nUtilize bisection and false position techinques with the lower x of -10 and upper x of 10 with a stopping\ncriterion of .001. The first \n\n (c)\tProblem Results\n\nIncremental:\t -5.25000000000019, -5.24900000000019\nBisection Root Estimate:\t -5.24901, -0.0\nFalse Position Root Estimate:\t-5.25927, 0.61139\n \n (d)\tProblem Discussion of Results\nThe incremental function returns the range in where the roots reside.\n\nThe Bisection Root result returns the estimated root of the function \nbetween the range of -10 and 10 and a stopping criteria of 100 iterations.\n\nThe False Position result of a trial and error technique to determine the root\nof the function and adjusts the values until a criteria is met or if the root is found.\n\n\"\"\"\n\n\ndef incremental(f, xa, xb, dx):\n x1 = xa\n f1 = f(xa)\n\n x2 = xa + dx\n f2 = f(x2)\n\n while sign(f1) == sign(f2):\n if x1 >= xb:\n return None, None\n x1 = x2\n f1 = f2\n x2 = x1 + dx\n f2 = f(x2)\n else:\n print('Incremental:\\t' + str(x1) + ', ' + str(x2))\n return x1, x2\n\n\ndef false_position(f, xa, xb):\n x_lp = xa\n x_up = xb\n\n for i in range(1, 21, 1):\n x_rp = x_up - ((f(x_up) * (x_lp - x_up)) / (f(x_lp) - f(x_up)))\n if f(x_lp) * f(x_rp) < 0:\n x_up = x_rp\n else:\n x_lp = x_rp\n print('False Position Root Estimate:\\t' + str(round(x_rp, 5)) + ', ' + str(round(f(x_rp), 5)))\n plt.plot(x_rp, f(x_rp), 'b*')\n\n\ndef bisection(f, xa, xb):\n x_lower = xa\n x_upper = xb\n x_r = x_lower\n for i in range(1, 100, 1):\n x_r = ((x_upper + x_lower) / 2)\n if (f(x_lower) * f(x_r) < 0):\n x_upper = x_r\n else:\n x_lower = x_r\n print('Bisection Root Estimate:\\t' + str(round(x_r, 5)) + ', ' + str(round(f(x_r), 5)))\n plt.plot(x_r, f(x_r), 'g*')\n\n\nif __name__ == \"__main__\":\n x: Union[ndarray, Tuple[ndarray, Optional[float]]] = np.linspace(-10, 10)\n y = lambda x: -12 - 21 * x - 18 * (x ** 2) - 2.75 * (x ** 3)\n\n\n line, = plt.plot(x, y(x), 'r')\n line.set_antialiased(False) # turn off antialising\n\n plt.setp(line, 'linewidth', 1.0)\n\n roots1, roots2 = incremental(y, -10, 10, .001)\n bisection(y, -10, 10)\n false_position(y, -10, 10)\n\n plt.legend((line, ), ('f(x) = -12 - 21x - 18x^2 - 2.75x^3',))\n plt.grid(True)\n plt.show()\n","repo_name":"atv32/Scientific-Computations","sub_path":"Examples/polynomial_roots.py","file_name":"polynomial_roots.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27585107437","text":"import requests\n\ndef CheckStb(auth, library):\n response = requests.get(\n \"https://raw.githubusercontent.com/nothings/stb/master/README.md\"\n )\n readme = response.content.decode(\"utf-8\")\n\n lines = readme.splitlines()\n for line in lines:\n if line.startswith(\"**[\" + library + \".h](\" + library + \".h)**\"):\n return line.split(\"|\")[1].split(\" \")[1]\n","repo_name":"edomin/vgazer","sub_path":"vgazer/version/stb.py","file_name":"stb.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73962878345","text":"import time\nimport numpy as np\nfrom PIL import Image\n\n# https://github.com/albumentations-team/albumentations#comments\nimport cv2\n# from imaginaire.utils.distributed import master_only_print as print\nimport albumentations as alb # noqa nopep8\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False)\n\nIMG_EXTENSIONS = ('jpg', 'jpeg', 'png', 'ppm', 'bmp',\n 'pgm', 'tif', 'tiff', 'webp',\n 'JPG', 'JPEG', 'PNG', 'PPM', 'BMP',\n 'PGM', 'TIF', 'TIFF', 'WEBP')\nHDR_IMG_EXTENSIONS = ('hdr',)\nVIDEO_EXTENSIONS = 'mp4'\n\n\nclass Augmentor(object):\n r\"\"\"Handles data augmentation using albumentations library.\"\"\"\n\n def __init__(self, aug_list, individual_video_frame_aug_list, image_data_types, is_mask,\n keypoint_data_types, interpolator):\n r\"\"\"Initializes augmentation pipeline.\n\n Args:\n aug_list (list): List of augmentation operations in sequence.\n individual_video_frame_aug_list (list): List of augmentation operations in sequence that will be applied\n to individual frames of videos independently.\n image_data_types (list): List of keys in expected inputs.\n is_mask (dict): Whether this data type is discrete masks?\n keypoint_data_types (list): List of keys which are keypoints.\n \"\"\"\n\n self.aug_list = aug_list\n self.individual_video_frame_aug_list = individual_video_frame_aug_list\n self.image_data_types = image_data_types\n self.is_mask = is_mask\n self.crop_h, self.crop_w = None, None\n self.resize_h, self.resize_w = None, None\n self.resize_smallest_side = None\n self.max_time_step = 1\n self.keypoint_data_types = keypoint_data_types\n self.interpolator = interpolator\n\n self.augment_ops = self._build_augmentation_ops()\n self.individual_video_frame_augmentation_ops = self._build_individual_video_frame_augmentation_ops()\n # Both crop and resize can't be none at the same time.\n if self.crop_h is None and self.resize_smallest_side is None and \\\n self.resize_h is None:\n raise ValueError('resize_smallest_side, resize_h_w, '\n 'and crop_h_w cannot all be missing.')\n # If resize_smallest_side is given, resize_h_w should not be give.\n if self.resize_smallest_side is not None:\n assert self.resize_h is None, \\\n 'Cannot have both `resize_smallest_side` and `resize_h_w` set.'\n if self.resize_smallest_side is None and self.resize_h is None:\n self.resize_h, self.resize_w = self.crop_h, self.crop_w\n\n def _build_individual_video_frame_augmentation_ops(self):\n r\"\"\"Builds sequence of augmentation ops that will be applied to each frame in the video independently.\n Returns:\n (list of alb.ops): List of augmentation ops.\n \"\"\"\n augs = []\n for key, value in self.individual_video_frame_aug_list.items():\n if key == 'random_scale_limit':\n if type(value) == float:\n scale_limit_lb = scale_limit_ub = value\n p = 1\n else:\n scale_limit_lb = value['scale_limit_lb']\n scale_limit_ub = value['scale_limit_ub']\n p = value['p']\n augs.append(alb.RandomScale(scale_limit=(-scale_limit_lb, scale_limit_ub), p=p))\n elif key == 'random_crop_h_w':\n h, w = value.split(',')\n h, w = int(h), int(w)\n self.crop_h, self.crop_w = h, w\n augs.append(alb.PadIfNeeded(min_height=h, min_width=w))\n augs.append(alb.RandomCrop(h, w, always_apply=True, p=1))\n return augs\n\n def _build_augmentation_ops(self):\n r\"\"\"Builds sequence of augmentation ops.\n Returns:\n (list of alb.ops): List of augmentation ops.\n \"\"\"\n augs = []\n for key, value in self.aug_list.items():\n if key == 'resize_smallest_side':\n if isinstance(value, int):\n self.resize_smallest_side = value\n else:\n h, w = value.split(',')\n h, w = int(h), int(w)\n self.resize_smallest_side = (h, w)\n elif key == 'resize_h_w':\n h, w = value.split(',')\n h, w = int(h), int(w)\n self.resize_h, self.resize_w = h, w\n elif key == 'random_resize_h_w_aspect':\n aspect_start, aspect_end = value.find('('), value.find(')')\n aspect = value[aspect_start+1:aspect_end]\n aspect_min, aspect_max = aspect.split(',')\n h, w = value[:aspect_start].split(',')[:2]\n h, w = int(h), int(w)\n aspect_min, aspect_max = float(aspect_min), float(aspect_max)\n augs.append(alb.RandomResizedCrop(\n h, w, scale=(1, 1),\n ratio=(aspect_min, aspect_max), always_apply=True, p=1))\n self.resize_h, self.resize_w = h, w\n elif key == 'rotate':\n augs.append(alb.Rotate(\n limit=value, always_apply=True, p=1))\n elif key == 'random_rotate_90':\n augs.append(alb.RandomRotate90(always_apply=False, p=0.5))\n elif key == 'random_scale_limit':\n augs.append(alb.RandomScale(scale_limit=(0, value), p=1))\n elif key == 'random_crop_h_w':\n h, w = value.split(',')\n h, w = int(h), int(w)\n self.crop_h, self.crop_w = h, w\n augs.append(alb.RandomCrop(h, w, always_apply=True, p=1))\n elif key == 'center_crop_h_w':\n h, w = value.split(',')\n h, w = int(h), int(w)\n self.crop_h, self.crop_w = h, w\n augs.append(alb.CenterCrop(h, w, always_apply=True, p=1))\n elif key == 'horizontal_flip':\n # This is handled separately as we need to keep track if this\n # was applied in order to correctly modify keypoint data.\n if value:\n augs.append(alb.HorizontalFlip(always_apply=False, p=0.5))\n # The options below including contrast, blur, motion_blur, compression, gamma\n # were used during developing face-vid2vid.\n elif key == 'contrast':\n brightness_limit = value['brightness_limit']\n contrast_limit = value['contrast_limit']\n p = value['p']\n augs.append(alb.RandomBrightnessContrast(\n brightness_limit=brightness_limit, contrast_limit=contrast_limit, p=p))\n elif key == 'blur':\n blur_limit = value['blur_limit']\n p = value['p']\n augs.append(alb.Blur(blur_limit=blur_limit, p=p))\n elif key == 'motion_blur':\n blur_limit = value['blur_limit']\n p = value['p']\n augs.append(alb.MotionBlur(blur_limit=blur_limit, p=p))\n elif key == 'compression':\n quality_lower = value['quality_lower']\n p = value['p']\n augs.append(alb.ImageCompression(quality_lower=quality_lower, p=p))\n elif key == 'gamma':\n gamma_limit_lb = value['gamma_limit_lb']\n gamma_limit_ub = value['gamma_limit_ub']\n p = value['p']\n augs.append(alb.RandomGamma(gamma_limit=(gamma_limit_lb, gamma_limit_ub), p=p))\n elif key == 'max_time_step':\n self.max_time_step = value\n assert self.max_time_step >= 1, \\\n 'max_time_step has to be at least 1'\n else:\n raise ValueError('Unknown augmentation %s' % (key))\n return augs\n\n def _choose_image_key(self, inputs):\n r\"\"\"Choose key to replace with 'image' for input to albumentations.\n\n Returns:\n key (str): Chosen key to be replace with 'image'\n \"\"\"\n if 'image' in inputs:\n return 'image'\n for data_type in inputs:\n if data_type in self.image_data_types:\n return data_type\n\n def _choose_keypoint_key(self, inputs):\n r\"\"\"Choose key to replace with 'keypoints' for input to albumentations.\n Returns:\n key (str): Chosen key to be replace with 'keypoints'\n \"\"\"\n if not self.keypoint_data_types:\n return None\n if 'keypoints' in inputs:\n return 'keypoints'\n for data_type in inputs:\n if data_type in self.keypoint_data_types:\n return data_type\n\n def _create_augmentation_targets(self, inputs):\n r\"\"\"Create additional targets as required by the albumentation library.\n\n Args:\n inputs (dict): Keys are from self.augmentable_data_types. Values can\n be numpy.ndarray or list of numpy.ndarray\n (image or list of images).\n Returns:\n (dict):\n - targets (dict): Dict containing mapping of keys to image/mask types.\n - new_inputs (dict): Dict containing mapping of keys to data.\n \"\"\"\n # Get additional target list.\n targets, new_inputs = {}, {}\n for data_type in inputs:\n if data_type in self.keypoint_data_types:\n # Keypoint-type.\n target_type = 'keypoints'\n elif data_type in self.image_data_types:\n # Image-type.\n # Find the target type (image/mask) based on interpolation\n # method.\n if self.is_mask[data_type]:\n target_type = 'mask'\n else:\n target_type = 'image'\n else:\n raise ValueError(\n 'Data type: %s is not image or keypoint' % (data_type))\n\n current_data_type_inputs = inputs[data_type]\n if not isinstance(current_data_type_inputs, list):\n current_data_type_inputs = [current_data_type_inputs]\n\n # Create additional_targets and inputs when there are multiples.\n for idx, new_input in enumerate(current_data_type_inputs):\n key = data_type\n if idx > 0:\n key = '%s::%05d' % (key, idx)\n targets[key] = target_type\n new_inputs[key] = new_input\n\n return targets, new_inputs\n\n def _collate_augmented(self, augmented):\n r\"\"\"Collate separated images back into sequence, grouped by keys.\n\n Args:\n augmented (dict): Dict containing frames with keys of the form\n 'key', 'key::00001', 'key::00002', ..., 'key::N'.\n Returns:\n (dict):\n - outputs (dict): Dict with list of collated inputs, i.e. frames of\n - same key are arranged in order ['key', 'key::00001', ..., 'key::N'].\n \"\"\"\n full_keys = sorted(augmented.keys())\n outputs = {}\n for full_key in full_keys:\n if '::' not in full_key:\n # First occurrence of this key.\n key = full_key\n outputs[key] = []\n else:\n key = full_key.split('::')[0]\n outputs[key].append(augmented[full_key])\n return outputs\n\n def _get_resize_h_w(self, height, width):\n r\"\"\"Get height and width to resize to, given smallest side.\n\n Args:\n height (int): Input image height.\n width (int): Input image width.\n Returns:\n (dict):\n - height (int): Height to resize image to.\n - width (int): Width to resize image to.\n \"\"\"\n if self.resize_smallest_side is None:\n return self.resize_h, self.resize_w\n\n if isinstance(self.resize_smallest_side, int):\n resize_smallest_height, resize_smallest_width = self.resize_smallest_side, self.resize_smallest_side\n else:\n resize_smallest_height, resize_smallest_width = self.resize_smallest_side\n\n if height * resize_smallest_width <= width * resize_smallest_height:\n new_height = resize_smallest_height\n new_width = int(np.round(new_height * width / float(height)))\n else:\n new_width = resize_smallest_width\n new_height = int(np.round(new_width * height / float(width)))\n return new_height, new_width\n\n def _perform_unpaired_augmentation(self, inputs, augment_ops):\n r\"\"\"Perform different data augmentation on different image inputs. Note that this operation only works\n\n Args:\n inputs (dict): Keys are from self.image_data_types. Values are list\n of numpy.ndarray (list of images).\n augment_ops (list): The augmentation operations.\n Returns:\n (dict):\n - augmented (dict): Augmented inputs, with same keys as inputs.\n - is_flipped (dict): Flag which tells if images have been LR flipped.\n \"\"\"\n # Process each data type separately as this is unpaired augmentation.\n is_flipped = {}\n for data_type in inputs:\n assert data_type in self.image_data_types\n augmented, flipped_flag = self._perform_paired_augmentation(\n {data_type: inputs[data_type]}, augment_ops)\n inputs[data_type] = augmented[data_type]\n is_flipped[data_type] = flipped_flag\n return inputs, is_flipped\n\n def _perform_paired_augmentation(self, inputs, augment_ops):\n r\"\"\"Perform same data augmentation on all inputs.\n\n Args:\n inputs (dict): Keys are from self.augmentable_data_types. Values are\n list of numpy.ndarray (list of images).\n augment_ops (list): The augmentation operations.\n\n Returns:\n (dict):\n - augmented (dict): Augmented inputs, with same keys as inputs.\n - is_flipped (bool): Flag which tells if images have been LR flipped.\n \"\"\"\n # Different data types may have different sizes and we use the largest one as the original size.\n # Convert PIL images to numpy array.\n self.original_h, self.original_w = 0, 0\n for data_type in inputs:\n if data_type in self.keypoint_data_types or \\\n data_type not in self.image_data_types:\n continue\n for idx in range(len(inputs[data_type])):\n value = inputs[data_type][idx]\n # Get resize h, w.\n w, h = get_image_size(value)\n self.original_h, self.original_w = max(self.original_h, h), max(self.original_w, w)\n # self.original_h, self.original_w = h, w\n # self.resize_h, self.resize_w = self._get_resize_h_w(h, w)\n # Convert to numpy array with 3 dims (H, W, C).\n value = np.array(value)\n if value.ndim == 2:\n value = value[..., np.newaxis]\n inputs[data_type][idx] = value\n self.resize_h, self.resize_w = self._get_resize_h_w(self.original_h, self.original_w)\n\n # Add resize op to augmentation ops.\n aug_ops_with_resize = [alb.Resize(\n self.resize_h, self.resize_w, interpolation=getattr(cv2, self.interpolator), always_apply=1, p=1\n )] + augment_ops\n\n # Create targets.\n targets, new_inputs = self._create_augmentation_targets(inputs)\n extra_params = {}\n\n # Albumentation requires a key called 'image' and\n # a key called 'keypoints', if any keypoints are being passed in.\n # Arbitrarily choose one key of image type to be 'image'.\n chosen_image_key = self._choose_image_key(inputs)\n new_inputs['image'] = new_inputs.pop(chosen_image_key)\n targets['image'] = targets.pop(chosen_image_key)\n # Arbitrarily choose one key of keypoint type to be 'keypoints'.\n chosen_keypoint_key = self._choose_keypoint_key(inputs)\n if chosen_keypoint_key is not None:\n new_inputs['keypoints'] = new_inputs.pop(chosen_keypoint_key)\n targets['keypoints'] = targets.pop(chosen_keypoint_key)\n extra_params['keypoint_params'] = alb.KeypointParams(\n format='xy', remove_invisible=False)\n\n # Do augmentation.\n augmented = alb.ReplayCompose(\n aug_ops_with_resize, additional_targets=targets,\n **extra_params)(**new_inputs)\n augmentation_params = augmented.pop('replay')\n\n # Check if flipping has occurred.\n is_flipped = False\n for augmentation_param in augmentation_params['transforms']:\n if 'HorizontalFlip' in augmentation_param['__class_fullname__']:\n is_flipped = augmentation_param['applied']\n self.is_flipped = is_flipped\n\n # Replace the key 'image' with chosen_image_key, same for 'keypoints'.\n augmented[chosen_image_key] = augmented.pop('image')\n if chosen_keypoint_key is not None:\n augmented[chosen_keypoint_key] = augmented.pop('keypoints')\n\n # Pack images back into a sequence.\n augmented = self._collate_augmented(augmented)\n\n # Convert keypoint types to np.array from list.\n for data_type in self.keypoint_data_types:\n augmented[data_type] = np.array(augmented[data_type])\n\n return augmented, is_flipped\n\n def perform_augmentation(self, inputs, paired, augment_ops):\n r\"\"\"Entry point for augmentation.\n\n Args:\n inputs (dict): Keys are from self.augmentable_data_types. Values are\n list of numpy.ndarray (list of images).\n paired (bool): Apply same augmentation to all input keys?\n augment_ops (list): The augmentation operations.\n \"\"\"\n # Make sure that all inputs are of same size, else trouble will\n # ensue. This is because different images might have different\n # aspect ratios.\n # Check within data type.\n for data_type in inputs:\n if data_type in self.keypoint_data_types or \\\n data_type not in self.image_data_types:\n continue\n for idx in range(len(inputs[data_type])):\n if idx == 0:\n w, h = get_image_size(inputs[data_type][idx])\n else:\n this_w, this_h = get_image_size(inputs[data_type][idx])\n # assert this_w == w and this_h == h\n # assert this_w / (1.0 * this_h) == w / (1.0 * h)\n # Check across data types.\n if paired and self.resize_smallest_side is not None:\n for idx, data_type in enumerate(inputs):\n if data_type in self.keypoint_data_types or \\\n data_type not in self.image_data_types:\n continue\n if paired:\n return self._perform_paired_augmentation(inputs, augment_ops)\n else:\n return self._perform_unpaired_augmentation(inputs, augment_ops)\n\n\ndef load_from_lmdb(keys, lmdbs):\n r\"\"\"Load keys from lmdb handles.\n\n Args:\n keys (dict): This has data_type as key, and a list of paths into LMDB as\n values.\n lmdbs (dict): This has data_type as key, and LMDB handle as value.\n Returns:\n data (dict): This has data_type as key, and a list of decoded items from\n LMDBs as value.\n \"\"\"\n data = {}\n for data_type in keys:\n if data_type not in data:\n data[data_type] = []\n data_type_keys = keys[data_type]\n if not isinstance(data_type_keys, list):\n data_type_keys = [data_type_keys]\n for key in data_type_keys:\n data[data_type].append(lmdbs[data_type].getitem_by_path(\n key.encode(), data_type))\n return data\n\n\ndef load_from_folder(keys, handles):\n r\"\"\"Load keys from lmdb handles.\n\n Args:\n keys (dict): This has data_type as key, and a list of paths as\n values.\n handles (dict): This has data_type as key, and Folder handle as value.\n Returns:\n data (dict): This has data_type as key, and a list of decoded items from\n folders as value.\n \"\"\"\n data = {}\n for data_type in keys:\n if data_type not in data:\n data[data_type] = []\n data_type_keys = keys[data_type]\n if not isinstance(data_type_keys, list):\n data_type_keys = [data_type_keys]\n for key in data_type_keys:\n data[data_type].append(handles[data_type].getitem_by_path(\n key.encode(), data_type))\n return data\n\n\ndef load_from_object_store(keys, handles):\n r\"\"\"Load keys from AWS S3 handles.\n\n Args:\n keys (dict): This has data_type as key, and a list of paths as\n values.\n handles (dict): This has data_type as key, and Folder handle as value.\n Returns:\n data (dict): This has data_type as key, and a list of decoded items from\n folders as value.\n \"\"\"\n data = {}\n for data_type in keys:\n if data_type not in data:\n data[data_type] = []\n data_type_keys = keys[data_type]\n if not isinstance(data_type_keys, list):\n data_type_keys = [data_type_keys]\n for key in data_type_keys:\n while True:\n try:\n data[data_type].append(handles[data_type].getitem_by_path(key, data_type))\n except Exception as e:\n print(e)\n print(key, data_type)\n print('Retrying in 30 seconds')\n time.sleep(30)\n continue\n break\n return data\n\n\ndef get_paired_input_image_channel_number(data_cfg):\n r\"\"\"Get number of channels for the input image.\n\n Args:\n data_cfg (obj): Data configuration structure.\n Returns:\n num_channels (int): Number of input image channels.\n \"\"\"\n num_channels = 0\n for ix, data_type in enumerate(data_cfg.input_types):\n for k in data_type:\n if k in data_cfg.input_image:\n num_channels += data_type[k].num_channels\n print('Concatenate %s for input.' % data_type)\n print('\\tNum. of channels in the input image: %d' % num_channels)\n return num_channels\n\n\ndef get_paired_input_label_channel_number(data_cfg, video=False):\n r\"\"\"Get number of channels for the input label map.\n\n Args:\n data_cfg (obj): Data configuration structure.\n video (bool): Whether we are dealing with video data.\n Returns:\n num_channels (int): Number of input label map channels.\n \"\"\"\n num_labels = 0\n if not hasattr(data_cfg, 'input_labels'):\n return num_labels\n for ix, data_type in enumerate(data_cfg.input_types):\n for k in data_type:\n if k in data_cfg.input_labels:\n if hasattr(data_cfg, 'one_hot_num_classes') and k in data_cfg.one_hot_num_classes:\n num_labels += data_cfg.one_hot_num_classes[k]\n if getattr(data_cfg, 'use_dont_care', False):\n num_labels += 1\n else:\n num_labels += data_type[k].num_channels\n print('Concatenate %s for input.' % data_type)\n\n if video:\n num_time_steps = getattr(data_cfg.train, 'initial_sequence_length',\n None)\n num_labels *= num_time_steps\n num_labels += get_paired_input_image_channel_number(data_cfg) * (\n num_time_steps - 1)\n\n print('\\tNum. of channels in the input label: %d' % num_labels)\n return num_labels\n\n\ndef get_class_number(data_cfg):\n r\"\"\"Get number of classes for class-conditional GAN model\n\n Args:\n data_cfg (obj): Data configuration structure.\n\n Returns:\n (int): Number of classes.\n \"\"\"\n return data_cfg.num_classes\n\n\ndef get_crop_h_w(augmentation):\n r\"\"\"Get height and width of crop.\n\n Args:\n augmentation (dict): Dict of applied augmentations.\n\n Returns:\n (dict):\n - crop_h (int): Height of the image crop.\n - crop_w (int): Width of the image crop.\n \"\"\"\n print(augmentation.__dict__.keys())\n for k in augmentation.__dict__.keys():\n if 'crop_h_w' in k:\n filed = augmentation[k]\n crop_h, crop_w = filed.split(',')\n crop_h = int(crop_h)\n crop_w = int(crop_w)\n # assert crop_w == crop_h, 'This implementation only ' \\\n # 'supports square-shaped images.'\n print('\\tCrop size: (%d, %d)' % (crop_h, crop_w))\n return crop_h, crop_w\n raise AttributeError\n\n\ndef get_image_size(x):\n try:\n w, h = x.size\n except Exception:\n h, w, _ = x.shape\n return w, h\n","repo_name":"NVlabs/imaginaire","sub_path":"imaginaire/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":25028,"program_lang":"python","lang":"en","doc_type":"code","stars":3891,"dataset":"github-code","pt":"81"} +{"seq_id":"22066424567","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nclass extended_WCG_node:\n conflict_state = [\n [\"c\",\"g\",\"s\",\"t\",\"w\"],\n [\"g\",\"s\",\"t\",\"w\"],\n [\"c\",\"g\",\"s\",\"w\"],\n [\"c\",\"g\",\"t\",\"w\"],\n [\"g\",\"s\",\"w\"],\n [\"g\",\"t\",\"w\"],\n [\"c\",\"g\",\"w\"],\n [\"g\",\"w\"],\n [\"c\",\"s\",\"t\", \"w\"],\n [\"s\",\"t\",\"w\"],\n [\"c\",\"s\",\"w\"],\n [\"s\",\"w\"],\n [\"g\",\"s\",\"t\"],\n [\"c\",\"s\",\"t\"],\n [\"s\",\"t\"]\n ]\n \n def __init__(self, left=[\"w\", \"g\", \"c\", \"s\", \"t\"], right=[], boat_side=False, children=[]):\n self.left = left\n self.right = right\n self.boat_side = boat_side\n self.children = children\n \n def gen_brand(self, visited, parent_map):\n children = []\n # the boat is on the left\n if not self.boat_side:\n for i in self.left:\n new_left = self.left[:]\n new_left.remove(i)\n new_right = self.right[:]\n new_right.append(i)\n for j in new_left:\n next_left = new_left[:]\n next_left.remove(j)\n next_right = new_right[:]\n next_right.append(j)\n # considering instance when bringing 2 things at a time\n if sorted(next_left) not in extended_WCG_node.conflict_state and not extended_WCG_node.check_visited(visited, next_left, next_right, not self.boat_side):\n child = extended_WCG_node(next_left, next_right, not self.boat_side, [])\n children.append(child)\n parent_map[child] = self\n # considering instance when bringing 1 thing at a time\n if sorted(new_left) not in extended_WCG_node.conflict_state and not extended_WCG_node.check_visited(visited, new_left, new_right, not self.boat_side):\n child = extended_WCG_node(new_left, new_right, not self.boat_side, [])\n children.append(child)\n parent_map[child] = self\n # considering instance when the shepherd travel alone\n if sorted(self.left) not in extended_WCG_node.conflict_state and not extended_WCG_node.check_visited(visited, self.left[:], self.right[:], not self.boat_side):\n child = extended_WCG_node(self.left, self.right, not self.boat_side, [])\n children.append(child)\n parent_map[child] = self\n # the boat is on the right\n else:\n for i in self.right:\n new_left = self.left[:]\n new_left.append(i)\n new_right = self.right[:]\n new_right.remove(i)\n for j in new_right:\n next_left = new_left[:]\n next_left.append(j)\n next_right = new_right[:]\n next_right.remove(j)\n if sorted(next_right) not in extended_WCG_node.conflict_state and not extended_WCG_node.check_visited(visited, next_left, next_right, not self.boat_side):\n child = extended_WCG_node(next_left, next_right, not self.boat_side, [])\n children.append(child)\n parent_map[child] = self\n if sorted(new_right) not in extended_WCG_node.conflict_state and not extended_WCG_node.check_visited(visited, new_left, new_right, not self.boat_side):\n child = extended_WCG_node(new_left, new_right, not self.boat_side, [])\n children.append(child)\n parent_map[child] = self\n if sorted(self.right) not in extended_WCG_node.conflict_state and not extended_WCG_node.check_visited(visited, self.left[:], self.right[:], not self.boat_side):\n child = extended_WCG_node(self.left[:], self.right[:], not self.boat_side, [])\n children.append(child)\n parent_map[child] = self\n self.children = children\n \n def __str__(self):\n return str(self.left) + \"~~~\" + str(self.right) + \"- The boat is on the \" + (\"Left\" if not self.boat_side else \"Right\")\n \n def check_visited(visited, left, right, boat_side):\n return any(\n sorted(left) == sorted(i.left) and\n sorted(right) == sorted(i.right) and\n boat_side == i.boat_side\n for i in visited\n )\n \ntime_complex = 0\nspace_complex = 1 \ndef seq_action(start, dfs=True):\n global time_complex\n global space_complex\n visit = [start]\n node = start\n visited = []\n parent_map = {start: None}\n while visit:\n node = visit.pop()\n time_complex = time_complex + 1 # take time when a node is taked out of the array\n if not extended_WCG_node.check_visited(visited, node.left, node.right, node.boat_side):\n visited.append(node)\n node.gen_brand(visited, parent_map)\n if dfs:\n visit = visit + node.children \n else:\n visit = node.children + visit\n space_complex = space_complex + len(node.children) # count the space when a node is generated \n if sorted(node.right) == sorted([\"c\", \"g\", \"s\", \"t\",\"w\"]):\n solution = []\n while node is not None:\n solution = [node] + solution\n node = parent_map[node]\n return solution\n return \"Solution not found\"\n\nif __name__ == \"__main__\":\n start = extended_WCG_node()\n\n # DFS\n RUN_DFS = seq_action(start)\n print(\"DFS solution : \")\n for node in RUN_DFS:\n print(node, ', ', end='')\n print(\"\\nTime complexity: \",time_complex)\n print(\"Space complexity: \",space_complex)\n \n # BFS\n time_complex = 0\n space_complex = 1\n RUN_BFS = seq_action(start, dfs= False)\n print(\"BFS solution : \")\n for node in RUN_BFS:\n print(node, ', ', end='')\n print(\"\\nTime complexity: \",time_complex)\n print(\"Space complexity: \",space_complex)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"huyhoang240101/extended-wolf-goat-cabbage","sub_path":"Capstone_project_final.py","file_name":"Capstone_project_final.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1647040849","text":"\ndef load_input(filename):\n res = []\n with open(filename, 'r') as f:\n for line in f:\n if line.startswith('noop'):\n res.append(None)\n elif line.startswith('addx'):\n res.append(int(line.split()[1]))\n return res\n\n\ndef signal_strengh(signal, nth):\n return signal[nth-1] * nth\n\n\ndef play_instruction(inst, val):\n if inst is None:\n return [val], val\n return [val, val], val+inst\n\n\nif __name__ == '__main__':\n instructions = load_input('input')\n\n signal = []\n register = 1\n\n for inst in instructions:\n t, register = play_instruction(inst, register)\n signal += t\n\n print('part1', sum(signal_strengh(signal, n) for n in range(20, 240, 40)))\n\n CRT = []\n current = ''\n for ipos, pos in enumerate(signal):\n relpos = pos % 40\n if ipos % 40 == 0:\n CRT.append(current)\n current = ''\n drawpos = len(current)\n\n if relpos-1 <= drawpos <= relpos+1:\n current += '#'\n else:\n current += '.'\n\n CRT.append(current)\n\n print('\\n'.join(CRT))\n","repo_name":"Lawes/adventofcode","sub_path":"2022/day10/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73657039626","text":"from multiprocessing import Pool,Manager\nfrom functools import partial\nfrom PIL import Image\nfrom hilbert import *\n\n# Index to color\ndef fillColor(color,dim,order,i):\n color[i] = hilbertIndexInverse(dim,order,i)\n\ndef RGBTrav():\n manager = Manager()\n color = manager.dict()\n dim = 3\n order = 8\n p = Pool(8)\n p.map(partial(fillColor,color,dim,order), range(2** (dim*order)))\n return color\n\ndef makeRGBImageHilbert():\n dim = 2\n order = 12\n\n img = Image.new('RGB', (2**order, 2**order), \"white\")\n pixels = img.load()\n\n color = RGBTrav()\n\n count = 0\n for i in range(2 ** (order * dim)):\n coords = hilbertIndexInverse(dim,order,i)\n if i % (2 ** (order * dim - 5)) == 0:\n img.save(\"hilbertRGB\" + str(count) + \".png\")\n count = count+1\n pixels[coords[0],coords[1]] = (color[i][0], color[i][1], color[i][2])\n\n img.save(\"hilbertRGB.png\")\n\ndef makeRGBImageVanilla():\n dim = 2\n order = 12\n\n img = Image.new('RGB', (2**order, 2**order), \"white\")\n pixels = img.load()\n\n for i in range(img.size[0]):\n for j in range(img.size[1]):\n index = i*(2**order) + j\n b = (index) % 256\n g = ((index)/256) % 256\n r = ((index)/256/256) % 256\n pixels[i,j] = (b,g,r)\n\n img.save(\"vanillaRGB.png\")\n\ndef makeRGBHilbertImage2DVanilla():\n dim = 2\n order = 12\n\n img = Image.new('RGB', (2**order, 2**order), \"white\")\n pixels = img.load()\n\n for index in range(2 ** (order+order)):\n b = (index) % 256\n g = ((index)/256) % 256\n r = ((index)/256/256) % 256\n coords = hilbertIndexInverse(dim,order,index)\n pixels[coords[0], coords[1]] = (b,g,r)\n\n img.save(\"vanillaRGBhilbert.png\")\n\nmakeRGBImageHilbert()\n# makeRGBImageVanilla()\n# makeRGBHilbertImage2DVanilla()\n","repo_name":"magebeans/HilbertCurves","sub_path":"colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4306888545","text":"import os\n\nfrom setuptools import setup\n\nexec(open(\"pynwsradar/version.py\").read())\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"pynwsradar\",\n version=__version__,\n license=\"MIT License\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/MatthewFlamm/pynwsradar\",\n author=\"Matthew Flamm\",\n author_email=\"matthewflamm0@gmail.com\",\n description=\"Python library to retrieve radar from NWS/NOAA\",\n packages=[\"pynwsradar\"],\n include_package_data=True,\n install_requires=[\n \"requests\",\n \"Pillow\",\n \"numpy\",\n ],\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\n \"console_scripts\": [\"pynwsradar=pynwsradar.console_script:main\"],\n },\n)\n","repo_name":"MatthewFlamm/pynwsradar","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70568929546","text":"from drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\nfrom django.urls import path\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Employees Management API\",\n default_version='v1',\n description=\"Employees Management API System\",\n ),\n public=True,\n)\n\nurlpatterns = [\n path('', schema_view.with_ui('swagger', cache_timeout=0),\n name='schema-swagger'),\n]\n","repo_name":"civilcoder55/django-rest-framework-api","sub_path":"app/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17395301356","text":"import bisect, collections, copy, functools, heapq, itertools, math, random, string\nfrom operator import xor\nfrom typing import List, Optional, Tuple\nfrom heapq import heappushpop, heapreplace\n\nfrom sortedcontainers import SortedList, SortedDict, SortedSet\n\nfrom test_tool import ListNode, TreeNode, null, tree2array, parseInput\n\n\n\"\"\"\nSome hint\n\n10**6 = 1000000\n1 << 20 = 1048576\n10**9 = 1000000000\n1 << 30 = 1073741824\n\"\"\"\n\n\n# https://leetcode.cn/contest/weekly-contest-328/problems/difference-between-maximum-and-minimum-price-sum/\nclass Solution:\n def solve(self, n: int, edges: List[List[int]], price: List[int]) -> int:\n g = [[] for _ in range(n)]\n for x, y in edges:\n g[x].append(y)\n g[y].append(x)\n ans = 0\n\n def dfs(x: int, fa: int) -> Tuple[int, int]: # 返回带叶子的最大路径和, 不带叶子的最大路径和\n nonlocal ans\n mx_s1 = p = price[x]\n mx_s2 = 0\n for y in g[x]:\n if y == fa:\n continue\n s1, s2 = dfs(y, x)\n # 已遍历过的最大带叶子的路径和(s1) + 当前不带叶子的路径和\n # 已遍历过的最大不带叶子的路径和(s2) + 当前带叶子的路径和\n ans = max(ans, mx_s1 + s2, mx_s2 + s1)\n mx_s1 = max(mx_s1, s1 + p)\n mx_s2 = max(mx_s2, s2 + p) # 这里加上 p 是因为 x 必然不是叶子\n return mx_s1, mx_s2\n\n dfs(0, -1)\n return ans\n\n\n# print(Solution().solve())\n# return\n\ntestcase = \"\"\"\nn = 6, edges = [[0,1],[1,2],[1,3],[3,4],[3,5]], price = [9,8,7,6,10,5]\nn = 3, edges = [[0,1],[1,2]], price = [1,1,1]\n\"\"\"\n\nobj = Solution()\nfor i, args in enumerate(parseInput(testcase)):\n print(f\"\\nTestcase {i}: {args}\\n\")\n print(obj.solve(*args))\n","repo_name":"bcvi/leetcode","sub_path":"lc_Python/contest/T3.py","file_name":"T3.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"6657690312","text":"import discord\nfrom discord.ext import commands\nfrom discord import SlashCommandGroup, Option, OptionChoice, SlashCommandOptionType\n\n\ndef hacker_chugakuse(name: str, server: str):\n return \\\n f\"\"\"\nこんにちは、Twitterを始めてみました。{name}です。\n僕は{server}の住人と会話した事があり、かつ{server}のメンバーです。\nちなみに好きなパソコンは使えれば何でもいいです。\nよろしくお願いします🙏\n\"\"\"\n\n\nclass NankaCog(commands.Cog):\n\n def __init__(self, bot):\n print(\"start Nanka init\")\n self.bot = bot\n\n nanka = SlashCommandGroup(\"nanka\", \"なんか\")\n\n @nanka.command(name=\"hacker\", description=\"ハッカーになれるよ\")\n async def adding(\n self,\n ctx: discord.ApplicationContext,\n name: Option(str, description=\"名前\", default=\"ハッカー中学生\"),\n server: Option(str, description=\"サーバー名\", default=\"ふぃぼ鯖\")\n ):\n await ctx.respond(content=hacker_chugakuse(name, server))\n\n\ndef setup(bot):\n bot.add_cog(NankaCog(bot))\n","repo_name":"nikawamikan/bankan","sub_path":"bot/cogs/nanka.py","file_name":"nanka.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29148530223","text":"import numpy as np\r\n\r\nclass Bandit_sanity:\r\n def __init__(self, dim, noise, arms, size):\r\n self.dim = dim\r\n self.theta = np.random.uniform(low=-1, high=1, size=(dim,))\r\n self.arms = arms\r\n self.noise = noise * np.eye(self.arms)\r\n self.size = size\r\n \r\n def step(self):\r\n x = np.random.uniform(low=-1, high=1, size=(self.arms, self.dim))\r\n r = np.dot(x, self.theta)\r\n r_noise = np.random.multivariate_normal(r, self.noise)\r\n return x, r\r\n\r\n \r\nif __name__ == '__main__':\r\n b = Bandit_sanity(100, 1, 3)\r\n b.step()","repo_name":"ZeroWeight/NeuralTS","sub_path":"data_sanity.py","file_name":"data_sanity.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"39088903192","text":"\"\"\"A simple wrapper around the OAuth2 credentials library.\"\"\"\n\nfrom oauth2client import client\n\n\ndef get_for_service_account(client_email, private_key_path, scope=None):\n \"\"\"Gets the credentials for a service account.\n\n .. note::\n You should not need to use this function directly.\n Instead, use the helper methods provided in\n :func:`gcloud.datastore.__init__.get_connection`\n and\n :func:`gcloud.datastore.__init__.get_dataset`\n which use this method under the hood.\n\n :type client_email: string\n :param client_email: The e-mail attached to the service account.\n\n :type private_key_path: string\n :param private_key_path: The path to a private key file (this file was\n given to you when you created the service\n account).\n\n :type scope: string or tuple of strings\n :param scope: The scope against which to authenticate. (Different services\n require different scopes, check the documentation for which\n scope is required for the different levels of access to any\n particular API.)\n\n :rtype: :class:`oauth2client.client.SignedJwtAssertionCredentials`\n :returns: A new SignedJwtAssertionCredentials instance with the\n needed service account settings.\n \"\"\"\n return client.SignedJwtAssertionCredentials(\n service_account_name=client_email,\n private_key=open(private_key_path, 'rb').read(),\n scope=scope)\n","repo_name":"dhermes/test-gcloud-on-gae","sub_path":"application/vendor/gcloud/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"22649083947","text":"# !/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom run import PATH\nfrom PIL import Image\nimport math\nimport operator\nfrom functools import reduce\n\nimport cv2\nimport numpy as np\n\n\n# 均值哈希算法\ndef aHash(img):\n # 缩放为8*8\n img = cv2.resize(img, (8, 8), interpolation=cv2.INTER_CUBIC)\n # 转换为灰度图\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # s为像素和初值为0,hash_str为hash值初值为''\n s = 0\n hash_str = ''\n # 遍历累加求像素和\n for i in range(8):\n for j in range(8):\n s = s + gray[i, j]\n # 求平均灰度\n avg = s / 64\n # 灰度大于平均值为1相反为0生成图片的hash值\n for i in range(8):\n for j in range(8):\n if gray[i, j] > avg:\n hash_str = hash_str + '1'\n else:\n hash_str = hash_str + '0'\n return hash_str\n\n\n# 差值感知算法\ndef dHash(img):\n # 缩放8*8\n img = cv2.resize(img, (9, 8), interpolation=cv2.INTER_CUBIC)\n # 转换灰度图\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n hash_str = ''\n # 每行前一个像素大于后一个像素为1,相反为0,生成哈希\n for i in range(8):\n for j in range(8):\n if gray[i, j] > gray[i, j + 1]:\n hash_str = hash_str + '1'\n else:\n hash_str = hash_str + '0'\n return hash_str\n\n\n# Hash值对比\ndef cmpHash(hash1, hash2):\n n = 0\n # hash长度不同则返回-1代表传参出错\n if len(hash1) != len(hash2):\n return -1\n # 遍历判断\n for i in range(len(hash1)):\n # 不相等则n计数+1,n最终为相似度\n if hash1[i] != hash2[i]:\n n = n + 1\n return n\n\n\ndef getusedCarBanner():\n exp1 = PATH(\"./tmp/usedCar_screenshot/exp_topBanner_1.png\")\n exp2 = PATH(\"./tmp/usedCar_screenshot/exp_topBanner_2.png\")\n exp5 = PATH(\"./tmp/usedCar_screenshot/exp_topBanner_5.png\")\n exp6 = PATH(\"./tmp/usedCar_screenshot/exp_topBanner_6.png\")\n\n img1 = cv2.imread(exp1)\n img2 = cv2.imread(exp2)\n img5 = cv2.imread(exp5)\n img6 = cv2.imread(exp6)\n\n hash1 = aHash(img1)\n hash2 = aHash(img2)\n hash5 = aHash(img5)\n hash6 = aHash(img6)\n\n result = [hash1, hash2, hash5, hash6]\n result.sort()\n\n return result\n\n\ndef getElementImgHashById(driver, imgPath, elementId):\n element = driver.find_element_by_id(elementId)\n location = element.location\n size = element.size\n box = (location[\"x\"], location[\"y\"], location[\"x\"] + size[\"width\"], location[\"y\"] + size[\"height\"])\n driver.get_screenshot_as_file(imgPath)\n # 截取图片\n image = Image.open(imgPath)\n newImage = image.crop(box)\n newImage.save(imgPath)\n\n targetImg = cv2.imread(imgPath)\n hashValue = aHash(targetImg)\n print(hashValue)\n return hashValue\n\n\ndef getElementImgHashByXpath(driver, imgPath, elementPath):\n element = driver.find_element_by_xpath(elementPath)\n\n location = element.location\n size = element.size\n box = (location[\"x\"], location[\"y\"], location[\"x\"] + size[\"width\"], location[\"y\"] + size[\"height\"])\n\n driver.get_screenshot_as_file(imgPath)\n # 截取图片\n image = Image.open(imgPath)\n newImage = image.crop(box)\n newImage.save(imgPath)\n\n targetImg = cv2.imread(imgPath)\n hashValue = aHash(targetImg)\n print(hashValue)\n return hashValue\n\ndef getElementImgHashByPredicate(driver, imgPath, elementValue):\n element = driver.find_element_by_ios_predicate(elementValue)\n\n location = element.location\n size = element.size\n box = (location[\"x\"], location[\"y\"], location[\"x\"] + size[\"width\"], location[\"y\"] + size[\"height\"])\n\n driver.get_screenshot_as_file(imgPath)\n # 截取图片\n image = Image.open(imgPath)\n newImage = image.crop(box)\n newImage.save(imgPath)\n\n targetImg = cv2.imread(imgPath)\n hashValue = aHash(targetImg)\n print(hashValue)\n return hashValue\n\n\nif __name__ == '__main__':\n # targetImage1 = PATH(\"./tmp/usedCar_screenshot/temp_screen9.png\")\n # targetImage2 = PATH(\"./tmp/usedCar_screenshot/temp_screen8.png\")\n #\n # img1 = cv2.imread(targetImage1)\n # img2 = cv2.imread(targetImage2)\n # hash1 = aHash(img1)\n # hash2 = aHash(img2)\n # print(hash1)\n # print(hash2)\n # n = cmpHash(hash1, hash2)\n # print('均值哈希算法相似度:' + str(n))\n #\n # hash1 = dHash(img1)\n # hash2 = dHash(img2)\n # print(hash1)\n # print(hash2)\n # n = cmpHash(hash1, hash2)\n # print(type(n))\n # print('差值哈希算法相似度:' + str(n))\n\n print(getusedCarBanner())\n","repo_name":"lk236225128/UIautocn","sub_path":"Base/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"81"} +{"seq_id":"23938503255","text":"from typing import Sequence, Literal\nfrom collections import OrderedDict\nimport numpy as np\n\nimport gym.spaces\n\nfrom intraday.frame import Frame\nfrom intraday.feature import Feature\n\n\nclass Snapshot(Feature):\n \"\"\"\n Saves some price of n past frames in the relation to the latest price\n \"\"\"\n \n def __init__(self,\n period: int = 10,\n write_to: Literal['frame', 'state', 'both'] = 'state'):\n assert isinstance(period, int) and (period > 0)\n super().__init__(write_to=write_to, period=period)\n prefix = f'snapshot_{self.period}_'\n self.names = [\n prefix + 'price',\n prefix + 'proxy',\n prefix + 'iou',\n prefix + 'volume',\n prefix + 'tr',\n ]\n if write_to in {'state', 'both'}:\n self.spaces = OrderedDict({\n self.names[0]: gym.spaces.Box(-1, 1, shape=(period,)),\n self.names[1]: gym.spaces.Box(0, 1, shape=(period,)),\n self.names[2]: gym.spaces.Box(-1, 1, shape=(period,)),\n self.names[3]: gym.spaces.Box(0, 1, shape=(period,)),\n self.names[4]: gym.spaces.Box(0, 1, shape=(period,)),\n })\n else:\n self.spaces = OrderedDict()\n self.values = np.zeros((period, 5), dtype=np.float32)\n self.empty = True\n \n def reset(self):\n self.values[...] = 0\n self.empty = True\n \n def process(self, frames: Sequence[Frame], state: OrderedDict):\n frame = frames[-1]\n close, high, low, volume, tr = frame.close, frame.high, frame.low, frame.volume, frame.true_range\n if self.empty:\n self.values[:, 0] = close\n self.values[:, 1] = high\n self.values[:, 2] = low\n self.values[:, 3] = volume\n self.values[:, 4] = tr\n self.empty = False\n else:\n self.values[1:, :] = self.values[:-1, :]\n self.values[0, 0] = close\n self.values[0, 1] = high\n self.values[0, 2] = low\n self.values[0, 3] = volume\n self.values[0, 4] = tr\n \n p = self.values[:, 0]\n delta = p - close\n m = max(abs(delta.max()), abs(delta.min()))\n price = (delta / m) if (m > 1e-8) else delta\n \n abs_delta = np.abs(delta)\n d = abs_delta.max() - abs_delta.min()\n proxy = (1.0 - (abs_delta / d)) if (d > 1e-8) else (1.0 - abs_delta)\n \n highs, lows = self.values[:, 1], self.values[:, 2]\n intersection = np.minimum(highs, high) - np.maximum(lows, low)\n union = np.maximum(highs, high) - np.minimum(lows, low)\n iou = np.clip(intersection / union, -1.0, 1.0)\n \n v = self.values[:, 3]\n m = v.max() - v.min()\n volume = ((v - v.min()) / m) if (m > 1e-8) else np.ones(self.period, dtype=np.float32)\n \n trs = self.values[:, 4]\n m = trs.max() - trs.min()\n tr = ((trs - trs.min()) / m) if (m > 1e-8) else np.ones(self.period, dtype=np.float32)\n \n if self.write_to_frame:\n setattr(frame, self.names[0], price)\n setattr(frame, self.names[1], proxy)\n setattr(frame, self.names[2], iou)\n setattr(frame, self.names[3], volume)\n setattr(frame, self.names[4], tr)\n if self.write_to_state:\n state[self.names[0]] = price\n state[self.names[1]] = proxy\n state[self.names[2]] = iou\n state[self.names[3]] = volume\n state[self.names[4]] = tr\n return price, proxy, iou, volume, tr\n \n def __repr__(self):\n return f'{self.__class__.__name__}(period={self.period}, write_to={self.write_to})'\n","repo_name":"diovisgood/intraday","sub_path":"intraday/features/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"81"} +{"seq_id":"22353188852","text":"from typing import Any, Dict, List\nimport torch\nimport numpy as np\n\nclass DatasetMapperByVideo:\n\n def __init__(\n self,\n video_pipe,\n text_pipe,\n ) -> None:\n self.video_pipe = video_pipe\n self.text_pipe = text_pipe\n\n def __call__(self, batch: List[Dict[str, Any]]) -> Dict[str, Any]:\n video_ids = [item[\"video_id\"] for item in batch]\n sentences = [sent for item in batch for sent in item[\"sentences\"]]\n\n gt = torch.stack([torch.Tensor(_) / item[\"duration\"] for item in batch for _ in item[\"timestamps\"]], dim=0)\n\n video_result = self.video_pipe(video_ids=video_ids)\n text_result = self.text_pipe(sentences=sentences)\n\n batch_split_size = [len(item[\"sentences\"]) for item in batch]\n\n return dict(**video_result, **text_result, batch_split_size=batch_split_size, gt=gt, batch=batch)\n\nclass DatasetMapperByPair:\n \n def __init__(self, video_pipe, text_pipe):\n self.video_pipe = video_pipe\n self.text_pipe = text_pipe\n \n def __call__(self, batch: List[Dict[str, Any]]) -> Dict[str, Any]:\n video_ids = [item[\"video_id\"] for item in batch]\n sentences = [item[\"sentence\"] for item in batch]\n \n gt = torch.stack([torch.Tensor(item[\"timestamp\"]) / item[\"duration\"] for item in batch], dim=0)\n\n video_result = self.video_pipe(video_ids=video_ids)\n text_result = self.text_pipe(sentences=sentences)\n\n return dict(**video_result, **text_result, gt=gt, batch=batch)\n","repo_name":"K-Nick/NLVL-benchmark","sub_path":"data/dataset_mapper.py","file_name":"dataset_mapper.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"15340627685","text":"# ------------------------------------------\n# CDCD for Dance-to-Music\n# Licensed under the MIT License.\n# written by Ye ZHU\n# ------------------------------------------\n\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../'))\n\nimport torch\nimport cv2\nimport argparse\nimport numpy as np\nimport torchvision\nfrom PIL import Image\nimport librosa\nfrom librosa.core import load\nfrom librosa.util import normalize\nimport soundfile as sf\nimport noisereduce as nr\nimport time\n\n\nfrom synthesis.utils.io import load_yaml_config\nfrom synthesis.modeling.build import build_model\nfrom synthesis.utils.misc import get_model_parameters_info\n\nclass VQ_Diffusion():\n def __init__(self, config, path):\n self.info = self.get_model(ema=True, model_path=path, config_path=config)\n self.model = self.info['model']\n self.epoch = self.info['epoch']\n self.model_name = self.info['model_name']\n self.model = self.model.cuda()\n self.model.eval()\n for param in self.model.parameters(): \n param.requires_grad=False\n\n def get_model(self, ema, model_path, config_path):\n if 'OUTPUT' in model_path: # pretrained model\n model_name = model_path.split(os.path.sep)[-3]\n else: \n model_name = os.path.basename(config_path).replace('.yaml', '')\n\n config = load_yaml_config(config_path)\n model = build_model(config)\n model_parameters = get_model_parameters_info(model)\n \n print(model_parameters)\n if os.path.exists(model_path):\n ckpt = torch.load(model_path, map_location=\"cpu\")\n\n if 'last_epoch' in ckpt:\n epoch = ckpt['last_epoch']\n elif 'epoch' in ckpt:\n epoch = ckpt['epoch']\n else:\n epoch = 0\n\n missing, unexpected = model.load_state_dict(ckpt[\"model\"], strict=False)\n print('Model missing keys:\\n', missing)\n print('Model unexpected keys:\\n', unexpected)\n\n if ema==True and 'ema' in ckpt:\n print(\"Evaluate EMA model\")\n ema_model = model.get_ema_model()\n missing, unexpected = ema_model.load_state_dict(ckpt['ema'], strict=False)\n \n return {'model': model, 'epoch': epoch, 'model_name': model_name, 'parameter': model_parameters}\n\n def inference_generate_sample_with_class(self, text, truncation_rate, save_root, batch_size,fast=False):\n os.makedirs(save_root, exist_ok=True)\n\n data_i = {}\n data_i['label'] = [text]\n data_i['image'] = None\n condition = text\n\n str_cond = str(condition)\n save_root_ = os.path.join(save_root, str_cond)\n os.makedirs(save_root_, exist_ok=True)\n\n with torch.no_grad():\n model_out = self.model.generate_content(\n batch=data_i,\n filter_ratio=0,\n replicate=batch_size,\n content_ratio=1,\n return_att_weight=False,\n sample_type=\"top\"+str(truncation_rate)+'r',\n ) # B x C x H x W\n\n # save results\n content = model_out['content']\n content = content.permute(0, 2, 3, 1).to('cpu').numpy().astype(np.uint8)\n for b in range(content.shape[0]):\n cnt = b\n save_base_name = '{}'.format(str(cnt).zfill(6))\n save_path = os.path.join(save_root_, save_base_name+'.jpg')\n im = Image.fromarray(content[b])\n im.save(save_path)\n\n def inference_generate_sample_with_condition(self, text, truncation_rate, save_root, batch_size,fast=False):\n os.makedirs(save_root, exist_ok=True)\n\n data_i = {}\n data_i['text'] = [text]\n data_i['image'] = None\n condition = text\n\n str_cond = str(condition)\n save_root_ = os.path.join(save_root, str_cond)\n os.makedirs(save_root_, exist_ok=True)\n\n if fast != False:\n add_string = 'r,fast'+str(fast-1)\n else:\n add_string = 'r'\n with torch.no_grad():\n model_out = self.model.generate_content(\n batch=data_i,\n filter_ratio=0,\n replicate=batch_size,\n content_ratio=1,\n return_att_weight=False,\n sample_type=\"top\"+str(truncation_rate)+add_string,\n ) # B x C x H x W\n\n # save results\n content = model_out['content']\n content = content.permute(0, 2, 3, 1).to('cpu').numpy().astype(np.uint8)\n for b in range(content.shape[0]):\n cnt = b\n save_base_name = '{}'.format(str(cnt).zfill(6))\n save_path = os.path.join(save_root_, save_base_name+'.png')\n im = Image.fromarray(content[b])\n im.save(save_path)\n\n\n def inference_music(self, music, motion, video, genre, mask, truncation_rate, save_root, batch_size,fast=False):\n os.makedirs(save_root, exist_ok=True)\n\n data_i = {}\n data_i['music'] = music\n data_i['motion'] = motion\n data_i['video'] = video\n data_i['genre'] = genre\n data_i['condiation_mask'] = mask\n data_i['negative_music'] = None\n # save_root_ = os.path.join(save_root)\n os.makedirs(save_root, exist_ok=True)\n if fast != False:\n add_string = 'r,fast'+str(fast-1)\n else:\n add_string = 'r'\n with torch.no_grad():\n model_out = self.model.generate_content(\n batch=data_i,\n filter_ratio=0.1, # ensure that it actually generate from full mask\n replicate=batch_size,\n content_ratio=0.5,\n return_att_weight=False,\n sample_type=\"top\"+str(truncation_rate)+add_string,\n )\n content = model_out['content']\n print(\"Check content:\", content.size())\n # file_audio = os.path.join(save_root,'generated_sample.wav')\n generated_audio = content.squeeze().detach().cpu().numpy()\n # sf.write(file_audio, generated_audio, 22050)\n\n return generated_audio\n\n\n\n\ndef beat_detect(x, sr=22050):\n onsets = librosa.onset.onset_detect(x, sr=sr, wait=1, delta=0.2, pre_avg=1, post_avg=1, post_max=1, units='time')\n n = np.ceil( len(x) / sr)\n beats = [0] * int(n)\n for time in onsets:\n beats[int(np.trunc(time))] = 1\n return beats\n\n\ndef beat_scores(gt, syn):\n assert len(gt) == len(syn)\n total_beats = sum(gt)\n cover_beats = sum(syn)\n\n hit_beats = 0\n for i in range(len(gt)):\n if gt[i] == 1 and gt[i] == syn[i]:\n hit_beats += 1\n\n return cover_beats/total_beats, hit_beats/total_beats\n\n\n\n\nif __name__ == '__main__':\n \n # modify the path to your config file and model checkpoint\n VQ_Diffusion = VQ_Diffusion('./config/config.yaml', path='./checkpoint/last.pth')\n sr = 22050\n\n testing_music = [line.rstrip() for line in open('./data/aist_audio_test_segment.txt')]\n cond_motion = [line.rstrip() for line in open('./data/aist_motion_test_segment.txt')]\n cond_video = [line.rstrip() for line in open('./data/aist_video_test_segment.txt')]\n genres = np.load('./data/test_genre.npy')\n total_cover_score = 0\n total_hit_score = 0\n start_time = time.time()\n for i, f in enumerate(testing_music):\n print(i)\n print(testing_music[i])\n print(cond_motion[i])\n print(cond_video[i])\n #start_time = time.time()\n music, sampling_rate = load(testing_music[i]) \n motion = np.load(cond_motion[i])\n video = np.load(cond_video[i])\n genre = genres[i]\n gt_beats = beat_detect(music)\n music = torch.from_numpy(music).float()#.unsqueeze(0).unsqueeze(1)\n motion = torch.from_numpy(motion).float().unsqueeze(0)\n video = torch.from_numpy(video).float().unsqueeze(0)\n genre = torch.from_numpy(genre).unsqueeze(0)\n generated_audio = VQ_Diffusion.inference_music(music.unsqueeze(0).unsqueeze(1), motion, video, genre, mask=None, truncation_rate=0.86, save_root='RESULT', batch_size=1)\n generated_audio = nr.reduce_noise(y=generated_audio, sr=22050)\n file_audio = 'generated_sample_' + str(i) + '.wav'\n file_audio = os.path.join('./RESULT_aist', file_audio)\n sf.write(file_audio, generated_audio, 22050)\n\n\n\n\n\n\n \n \n\n\n\n\n\n\n","repo_name":"L-YeZhu/CDCD","sub_path":"inference/inference_aist.py","file_name":"inference_aist.py","file_ext":"py","file_size_in_byte":8320,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"62"} +{"seq_id":"7655429661","text":"from django.shortcuts import render\nfrom .models import Products,Order\nfrom django.core.paginator import Paginator\nfrom django.http import JsonResponse\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\n\ndef index(request):\n product_objects = Products.objects.all()\n\n\n item_name = request.GET.get('item_name')\n if item_name != '' and item_name is not None:\n product_objects = product_objects.filter(title__icontains=item_name)\n\n\n paginator = Paginator(product_objects,4)\n page = request.GET.get(\"page\")\n product_objects = paginator.get_page(page)\n\n return render(request,'shop/index.html',{'product_objects':product_objects})\n\n \n \n\n\ndef detail(request,id):\n product_object = Products.objects.get(pk=id)\n return render(request,'shop/detail.html',{'product_object':product_object})\n\n\n@csrf_exempt\ndef checkout(request):\n if request.method == 'POST':\n try:\n\n body_unicode = request.body.decode('utf-8')\n post_data = json.loads(body_unicode)\n\n name = post_data.get('name')\n email = post_data.get('email')\n address = post_data.get('address')\n total_price = post_data.get('total_price')\n\n\n response_data = {\n 'message': 'Order placed successfully.',\n 'name': name,\n 'email': email,\n 'address': address,\n 'total_price': total_price\n }\n\n return JsonResponse(response_data)\n\n except json.JSONDecodeError:\n\n return JsonResponse({'error': 'Invalid JSON data'}, status=400)\n\n\n return render(request, 'shop/checkout.html')\n\ndef get_product_name(request, product_id):\n try:\n product = Products.objects.get(pk=product_id)\n product_data = {\n 'product_name': product.title,\n 'product_price': product.price\n }\n return JsonResponse(product_data)\n except Products.DoesNotExist:\n return JsonResponse({'error': 'Product not found'}, status=404)\n\n \n","repo_name":"Uricorn99/Fast_Food_Shopping_Cart-Django-","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12486145038","text":"\"\"\"\nSometimes you need to limit array result to use. Such as you only need the \n value over 10 or, you need value under than 100. By use this algorithms, you\n can limit your array to specific value\n\nIf array, Min, Max value was given, it returns array that contains values of \n given array which was larger than Min, and lower than Max. You need to give\n 'unlimit' to use only Min or Max.\n\nex) limit([1,2,3,4,5], None, 3) = [1,2,3]\n\nComplexity = O(n)\n\"\"\"\n\n# tl:dr -- array slicing by value\ndef limit(arr, min_lim = None, max_lim = None):\n result = []\n if min_lim == None:\n for i in arr:\n if i <= max_lim:\n result.append(i)\n elif max_lim == None:\n for i in arr:\n if i >= min_lim:\n result.append(i)\n else:\n for i in arr:\n if i >= min_lim and i <= max_lim:\n result.append(i)\n\n return result\n","repo_name":"mishnit/mishnit.github.io","sub_path":"algorithms/arrays/limit.py","file_name":"limit.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"62"} +{"seq_id":"10410500596","text":"from lxml import html \r\nimport csv,os,json\r\nimport requests\r\nimport builtins\r\nfrom time import sleep\r\n \r\n\r\nurl_dict = dict()\r\n\r\ndef find_region(url_string):\r\n region=url_string[19:22]\r\n if(region[2]=='/'): region = region[:2]\r\n elif region[2]=='.' : region = 'co.uk'\r\n return region\r\n\r\n\r\ndef find_dp(url_string):\r\n before_dp = url_string.find(\"dp\")\r\n return url_string[before_dp+3:before_dp+13]\r\n\r\n\r\nfile_url = open('gpurls.txt', 'r')\r\nstring_list = file_url.readlines()\r\nfor strings in string_list:\r\n region = find_region(strings)\r\n if region in url_dict.keys():\r\n url_dict[region].append(find_dp(strings))\r\n else: url_dict[region]=[find_dp(strings)]\r\n\r\n\r\nurl_list = []\r\nfor key, values in url_dict.items():\r\n for dps in values:\r\n url_list.append(\"http://www.amazon.\"+key+\"/dp/\"+dps)\r\n \r\n\r\ndef AmzonParser(url):\r\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36'}\r\n page = requests.get(url,headers=headers)\r\n while True:\r\n sleep(2)\r\n try:\r\n doc = html.fromstring(page.content)\r\n XPATH_NAME = '//h1[@id=\"title\"]//text()'\r\n XPATH_SALE_PRICE = '//span[contains(@id,\"ourprice\") or contains(@id,\"saleprice\")]/text()'\r\n XPATH_ORIGINAL_PRICE = '//td[contains(text(),\"List Price\") or contains(text(),\"M.R.P\") or contains(text(),\"Price\")]/following-sibling::td/text()'\r\n \r\n RAW_NAME = doc.xpath(XPATH_NAME)\r\n RAW_SALE_PRICE = doc.xpath(XPATH_SALE_PRICE)\r\n RAW_ORIGINAL_PRICE = doc.xpath(XPATH_ORIGINAL_PRICE)\r\n \r\n NAME = ' '.join(''.join(RAW_NAME).split()) if RAW_NAME else None\r\n SALE_PRICE = ' '.join(''.join(RAW_SALE_PRICE).split()).strip() if RAW_SALE_PRICE else None\r\n ORIGINAL_PRICE = ''.join(RAW_ORIGINAL_PRICE).strip() if RAW_ORIGINAL_PRICE else None\r\n \r\n if not ORIGINAL_PRICE:\r\n ORIGINAL_PRICE = SALE_PRICE\r\n \r\n if page.status_code!=200:\r\n raise ValueError('captha')\r\n data = {\r\n 'NAME':NAME,\r\n 'SALE_PRICE':SALE_PRICE,\r\n 'ORIGINAL_PRICE':ORIGINAL_PRICE,\r\n 'URL':url,\r\n }\r\n \r\n return data\r\n except Exception as e:\r\n print(e)\r\n \r\ndef ReadAsin():\r\n extracted_data = []\r\n for url in url_list:\r\n print(\"Processing: \"+url)\r\n elem = AmzonParser(url)\r\n extracted_data.append(elem)\r\n #extracted_data.append(AmzonParser(url))\r\n if elem['SALE_PRICE']==elem['ORIGINAL_PRICE']==None: continue\r\n print(elem['NAME']+' '+elem['SALE_PRICE']) if elem['SALE_PRICE']!=None else print(elem['NAME']+' '+elem['ORIGINAL_PRICE'])\r\n #f=open('data.json','w')\r\n #json.dump(extracted_data,f,indent=4)\r\n \r\nif __name__ == \"__main__\":\r\n ReadAsin()","repo_name":"Clincius/AmazonPriceScraper","sub_path":"AmazonScraper.py","file_name":"AmazonScraper.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"5131058176","text":"# Link to problem : https://leetcode.com/problems/remove-element/\n\nclass Solution:\n def removeElement(self, nums ,val):\n count = 0\n for i in range(len(nums)):\n if(nums[i] != val):\n nums[count] = nums[i]\n count += 1\n return count ","repo_name":"dsrao711/DSA-Together-HacktoberFest","sub_path":"twopointer/RemoveElement.py","file_name":"RemoveElement.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"62"} +{"seq_id":"31688535186","text":"import random\r\nprompt = '-->'\r\n\r\ndef playRound(budget: int) -> tuple:\r\n sum = sumOfDice(random.randint(1,6), random.randint(1,6))\r\n if sum == 7:\r\n budget += 4\r\n return (\"Win\",budget)\r\n else:\r\n budget -= 1\r\n return (\"Loss\",budget)\r\n\r\ndef sumOfDice(die1: int, die2: int) -> int:\r\n return die1 + die2\r\n\r\ndef haveMoney(budget: int) -> bool:\r\n return True if budget > 0 else False\r\n\r\ndef main():\r\n numRolls = 0\r\n outputString = \"\\t{0}\\t\\t{1}\\t\\t{2}\"\r\n print(\"Gambeling Budget\")\r\n budget = int(input(prompt))\r\n print(\"Number of rolls\\t\\tWin or Loss\\tCurrent value of the pot\")\r\n print(outputString.format(numRolls, \"Put\", budget))\r\n while haveMoney(budget):\r\n numRolls += 1\r\n output = playRound(budget)\r\n budget = output[1]\r\n print(outputString.format(numRolls, output[0], output[1]))\r\n\r\n print(\"Sorry you're out of money\") \r\n print(\"Heres your number of roles\", numRolls)\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"linkluky/HomeWork-Programing-python","sub_path":"chapter 3.11.py","file_name":"chapter 3.11.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70920210119","text":"import sys\nsys.path.insert(0,\".\")\nimport os\nimport pymatting\n#$import matting.utils.config as config\nimport numpy as np\nimport cv2\n\ndef main():\n\n #train_fg_path = config.fg_path\n #train_alpha_path = config.alpha_path\n train_fg_path = \"/mnt/d/DataSets/Adobe_Deep_Matting_Dataset/all_fg\"\n train_alpha_path = \"/mnt/d/DataSets/Adobe_Deep_Matting_Dataset/all_alpha\"\n\n my_sum = 0\n for f in os.listdir(train_fg_path):\n if f.endswith(\".jpg\") or f.endswith(\".png\") or f.endswith(\".jpeg\") or f.endswith(\".JPG\"):\n img = cv2.imread(os.path.join(train_fg_path, f))\n alpha = cv2.imread(os.path.join(train_alpha_path, f), 0)\n my_sum += np.sum((alpha < 255) & (alpha > 0))\n print(my_sum)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kfeng123/LSA-Matting","sub_path":"tools/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"62"} +{"seq_id":"27716107351","text":"from Organism import Organism\nfrom World import World\n\nclass Animal(Organism):\n def __init__(self, power, initiative, world, x, y):\n super().__init__(power, initiative, world, x, y)\n self.SetOld()\n \n def SetOld(self):\n self.oldX = self.x\n self.oldY = self.y\n \n def Back(self):\n self.x = self.oldX\n self.y = self.oldY\n \n def Action(self):\n self.SetOld()\n move = self.world.RandomMove(self.x,self.y,False)\n self.x += move.GetX()\n self.y += move.GetY()\n \n def Collision(self,attacker):\n if type(attacker) == type(self):\n attacker.Back()\n attacker.NewAnimal()\n elif self.HasTargetLowerPower(attacker):\n attacker.Die(self)\n else:\n self.Die(attacker)\n def NewAnimal(self):\n cMove = self.world.RandomMove(self.x, self.y, True)\n if not cMove.IsNone():\n cX = self.x + cMove.GetX()\n cY = self.y + cMove.GetY()\n self.world.AddNewOrganism(self.NewOrganism(cX,cY))","repo_name":"grzybear/po3","sub_path":"po3/Animal.py","file_name":"Animal.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71910616838","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom blog.views import index, show_post, create_post, delete_post, publish_post\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', index),\n url(r'^post/(?P\\d+)/$', show_post),\n url(r'^post/create/$', create_post),\n url(r'^post/delete/(?P\\d+)/$', delete_post),\n url(r'^post/publish/(?P\\d+)/$', publish_post),\n]\n","repo_name":"chexca/django-example","sub_path":"webpage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28316257037","text":"import sys\n\nclass Character:\n def __init__(self, name, damage, hp):\n self.name = name\n self.damage = damage\n self.hp = hp\n\n def display_stats(self):\n print(f\"Character: {self.name}\")\n print(f\"HP: {self.hp}\")\n print(f\"Damage: {self.damage}\\n\")\n\n def set_damage(self, damage):\n self.damage = damage\n\n def set_hp(self, hp):\n self.hp = hp\n\n def get_name(self):\n return self.name\n\ndef print_choices():\n print(\"Choose your character\")\n characters = {\n \"1\": \"wizard\",\n \"2\": \"elf\",\n \"3\": \"human\",\n \"4\": \"orc\",\n \"5\": \"exit\"\n }\n for key, value in characters.items():\n print(f\"{key}) {value.capitalize()}\")\n\ndef select_character():\n choice = input(\"Selection:\").lower()\n characters = {\n \"1\": Character(\"wizard\", 150, 70),\n \"wizard\": Character(\"wizard\", 150, 70),\n \"2\": Character(\"elf\", 200, 100),\n \"elf\": Character(\"elf\", 200, 100),\n \"3\": Character(\"human\", 20, 150),\n \"human\": Character(\"human\", 20, 150),\n \"4\": Character(\"orc\", \"1/2 of Dragon's Current HP\", 350),\n \"orc\": Character(\"orc\", \"1/2 of Dragon's Current HP\", 350),\n \"exit\": \"exit\",\n \"5\": \"5\"\n }\n if choice in characters:\n if choice == \"5\" or choice == \"exit\":\n print(\"Exiting...\")\n sys.exit()\n player_character = characters[choice]\n print(f\"You chose {player_character.get_name()}\")\n if choice == \"orc\":\n player_character.display_stats()\n else:\n characters[\"orc\"].display_stats() # Display Orc stats separately\n return player_character\n else:\n print(\"Unknown character. Please enter a valid choice.\")\n return select_character()\n\ndef battle(player_character, dragon):\n while True:\n if player_character.name == \"orc\" and dragon.hp <= 5:\n player_character.set_damage(5)\n dragon.hp -= player_character.damage\n elif player_character.name == \"orc\":\n half_dragon_hp = dragon.hp // 2\n player_character.set_damage(half_dragon_hp)\n dragon.hp -= half_dragon_hp\n else:\n dragon.hp -= player_character.damage\n\n print(f\"The {player_character.name} damaged the Dragon!\")\n print(f\"The Dragon's hitpoints are now: {dragon.hp}\\n\")\n\n if dragon.hp <= 0:\n print(\"You won! The Dragon has lost the battle\")\n break\n\n player_character.hp -= dragon.damage\n print(f\"The Dragon damaged the {player_character.name}\")\n print(f\"The {player_character.name} hitpoints are now: {player_character.hp}\\n\")\n\n if player_character.hp <= 0:\n print(\"You lost!\")\n break\n\ndef main():\n while True:\n dragon = Character(\"Dragon\", 50, 300)\n print_choices()\n player_character = select_character()\n battle(player_character, dragon)\n print(\"Play again? Y/N\")\n choice = input().lower()\n if choice == \"n\":\n sys.exit()\n elif choice != \"y\":\n print(\"Invalid choice. Please enter 'Y' or 'N'.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HookedOnPhones/My-Code","sub_path":"Projects/battlegame2_1.py","file_name":"battlegame2_1.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21171119388","text":"import pandas as pd\nimport numpy as np\nimport statistics\n\n\nWINDOW_SIZE = 9\n\n#codon table that maps codon to its amino acid\ncodon_table = {'TCA': 'S', 'AAT': 'N', 'TGG': 'W', 'GAT': 'D', 'GAA': 'E', 'TTC': 'F', 'CCG': 'P',\n 'ACT': 'T', 'GGG': 'G', 'ACG': 'T', 'AGA': 'R', 'TTG': 'L', 'GTC': 'V', 'GCA': 'A',\n 'TGA': '*', 'CGT': 'R', 'CAC': 'H', 'CTC': 'L', 'CGA': 'R', 'GCT': 'A', 'ATC': 'I',\n 'ATA': 'I', 'TTT': 'F', 'TAA': '*', 'GTG': 'V', 'GCC': 'A', 'GAG': 'E', 'CAT': 'H',\n 'AAG': 'K', 'AAA': 'K', 'GCG': 'A', 'TCC': 'S', 'GGC': 'G', 'TCT': 'S', 'CCT': 'P',\n 'GTA': 'V', 'AGG': 'R', 'CCA': 'P', 'TAT': 'Y', 'ACC': 'T', 'TCG': 'S', 'ATG': 'M',\n 'TTA': 'L', 'TGC': 'C', 'GTT': 'V', 'CTT': 'L', 'CAG': 'Q', 'CCC': 'P', 'ATT': 'I',\n 'ACA': 'T', 'AAC': 'N', 'GGT': 'G', 'AGC': 'S', 'CGG': 'R', 'TAG': '*', 'CGC': 'R',\n 'AGT': 'S', 'CTA': 'L', 'CAA': 'Q', 'CTG': 'L', 'GGA': 'G', 'TGT': 'C', 'TAC': 'Y',\n 'GAC': 'D'}\n\n#amino acid table that maps amino acids to a list of codons that creates them\namino_acid_table = {'S': ['TCA', 'TCC', 'TCT', 'TCG', 'AGC', 'AGT'], 'N': ['AAT', 'AAC'], 'W': ['TGG'],\n 'D': ['GAT', 'GAC'], 'E': ['GAA', 'GAG'], 'F': ['TTC', 'TTT'], 'P': ['CCG', 'CCT', 'CCA', 'CCC'],\n 'T': ['ACT', 'ACG', 'ACC', 'ACA'], 'G': ['GGG', 'GGC', 'GGT', 'GGA'],\n 'R': ['AGA', 'CGT', 'CGA', 'AGG', 'CGG', 'CGC'], 'L': ['TTG', 'CTC', 'TTA', 'CTT', 'CTA', 'CTG'],\n 'V': ['GTC', 'GTG', 'GTA', 'GTT'], 'A': ['GCA', 'GCT', 'GCC', 'GCG'], '*': ['TGA', 'TAA', 'TAG'],\n 'H': ['CAC', 'CAT'], 'I': ['ATC', 'ATA', 'ATT'], 'K': ['AAG', 'AAA'], 'Y': ['TAT', 'TAC'],\n 'M': ['ATG'], 'C': ['TGC', 'TGT'], 'Q': ['CAG', 'CAA']}\n\ncodon_freq_ecoli = {'TTT': 22.38, 'TCT': 8.61, 'TAT': 16.36, 'TGT': 5.19, 'TTC': 16.21,\n 'TCC': 8.81, 'TAC': 12.15, 'TGC': 6.34, 'TTA': 13.83, 'TCA': 7.57,\n 'TAA': 2.03, 'TGA': 1.04, 'TTG': 13.37, 'TCG': 8.79, 'TAG': 0.3,\n 'TGG': 15.21, 'CTT': 11.44, 'CCT': 7.22, 'CAT': 12.84, 'CGT': 20.7,\n 'CTC': 10.92, 'CCC': 5.56, 'CAC': 9.44, 'CGC': 21.48, 'CTA': 3.93,\n 'CCA': 8.44, 'CAA': 15.1, 'CGA': 3.67, 'CTG': 52.1, 'CCG': 22.65,\n 'CAG': 29.21, 'CGG': 5.72, 'ATT': 30.21, 'ACT': 9.02, 'AAT': 18.26,\n 'AGT': 9.08, 'ATC': 24.6, 'ACC': 22.88, 'AAC': 21.47, 'AGC': 15.89,\n 'ATA': 4.88, 'ACA': 7.63, 'AAA': 33.94, 'AGA': 2.43, 'ATG': 27.59,\n 'ACG': 14.47, 'AAG': 10.7, 'AGG': 1.48, 'GTT': 18.39, 'GCT': 15.54,\n 'GAT': 32.43, 'GGT': 24.45, 'GTC': 15.07, 'GCC': 25.45, 'GAC': 19.14,\n 'GGC': 28.65, 'GTA': 10.97, 'GCA': 20.61, 'GAA': 39.55, 'GGA': 8.44,\n 'GTG': 25.9, 'GCG': 32.79, 'GAG': 18.24, 'GGG': 11.29}\n\ncodon_freq_scerevisiae = { 'TTT': 26.18, 'TCT': 23.35, 'TAT': 19.05, 'TGT': 7.82, 'TTC': 17.88,\n 'TCC': 14.07, 'TAC': 14.6, 'TGC': 4.75, 'TTA': 26.33, 'TCA': 19.05,\n 'TAA': 0.95, 'TGA': 0.6, 'TTG': 26.5, 'TCG': 8.71, 'TAG': 0.46,\n 'TGG': 10.35, 'CTT': 12.27, 'CCT': 13.57, 'CAT': 13.89, 'CGT': 6.26,\n 'CTC': 5.52, 'CCC': 6.91, 'CAC': 7.74, 'CGC': 2.63, 'CTA': 13.52,\n 'CCA': 17.81, 'CAA': 27.1, 'CGA': 3.1, 'CTG': 10.65, 'CCG': 5.42,\n 'CAG': 12.42, 'CGG': 1.82, 'ATT': 30.1, 'ACT': 20.24, 'AAT': 36.61,\n 'AGT': 14.6, 'ATC': 16.99, 'ACC': 12.48, 'AAC': 24.8, 'AGC': 9.96,\n 'ATA': 18.29, 'ACA': 18.18, 'AAA': 42.83, 'AGA': 21.05, 'ATG': 20.68,\n 'ACG': 8.15, 'AAG': 30.52, 'AGG': 9.45, 'GTT': 21.47, 'GCT': 20.28,\n 'GAT': 38.09, 'GGT': 22.59, 'GTC': 11.23, 'GCC': 12.14, 'GAC': 20.39,\n 'GGC': 9.78, 'GTA': 12.07, 'GCA': 16.26, 'GAA': 45.81, 'GGA': 11.19,\n 'GTG': 10.72, 'GCG': 6.17, 'GAG': 19.55, 'GGG': 6.06}\n\n\n#---------------------------------------------------------------------------------------------------------------#\n\ndef get_aa_frequencies_list(codon_freqs, amino_acid_table, codon_table):\n \"\"\"\n Parameters: codon_freqs(dictionary), amino_acid_table(dictionary), codon_table(dictionary)\n Description: Creates a list for each amino acid of all associated codon frequencies\n Return: dictionary of lists ({key=amino_acid, val=list(codon_freq))\n Todo: NONE\n \"\"\"\n #n = len(codon_freqs)\n aa_freqs = {}\n aa_map = {}\n\n # for i in range(n):\n # aa_freqs[i] = {}\n for aa, l in amino_acid_table.items():\n aa_freqs[aa] = []\n aa_map[aa] = []\n for codon in l:\n if codon in codon_freqs.keys():\n aa_freqs[aa].append(codon_freqs[codon])\n aa_map[aa].append(codon + ' ' + str(codon_freqs[codon]))\n\n return aa_freqs, aa_map\n\n#---------------------------------------------------------------------------------------------------------------#\n\ndef calc_minMax_percent(seq, aa_avg_freq, codon_freq, codon_table, window_size):\n \"\"\"\n Parameters: seq(String), aa_avg_freq(dictionary), codon_freq(dictionary), codon_table(dictionary), window_size(int)\n Description: Generates a list of %minMax values based on window_size for a given sequence\n Return: list\n Todo: NONE\n \"\"\"\n codon_seq = [seq[s:s+3] for s in range(0, len(seq), 3)]\n n = len(codon_seq)\n skip = int(window_size/2)\n minMax_by_row = [0.0] * n\n\n actual = 0.0\n maximum = 0.0\n minimum = 0.0\n average = 0.0\n percent_max = 0.0\n percent_min = 0.0\n\n for i in range(skip-1, skip+n-window_size+1):\n codons_in_window = codon_seq[i-skip:i+window_size-skip]\n actual = maximum = minimum = average = percent_max = percent_min = 0.0\n\n for codon in codons_in_window:\n if len(codon) != 3:\n continue\n freq_for_window = aa_avg_freq[codon_table[codon]]\n\n if not freq_for_window:\n freq_for_window.append(0)\n\n m = len(freq_for_window)\n\n actual += codon_freq[codon]\n maximum += max(freq_for_window)\n minimum += min(freq_for_window)\n average += sum(freq_for_window) / m\n\n actual /= window_size\n maximum /= window_size\n minimum /= window_size\n average /= window_size\n\n if (maximum - average) != 0:\n percent_max = ((actual-average)/(maximum-average))*100\n if (average - minimum) != 0:\n percent_min = ((average-actual)/(average-minimum))*100\n\n if percent_max >= 0:\n minMax_by_row[i] = percent_max\n else:\n minMax_by_row[i] = -1 * percent_min\n\n return minMax_by_row\n\n#---------------------------------------------------------------------------------------------------------------#\n\ndef add_col_to_df(MM_list, df):\n MM_min_list = []\n MM_max_list = []\n MM_avg_list = []\n MM_median_list = []\n\n for l in MM_list:\n if not l:\n MM_min_list.append(None)\n MM_avg_list.append(None)\n MM_max_list.append(None)\n MM_median_list.append(None)\n else:\n MM_min_list.append(min(l))\n MM_avg_list.append(sum(l) / len(l))\n MM_max_list.append(max(l))\n MM_median_list.append(statistics.median(l))\n\n #print(MM_min_list)\n df['MM_min'] = MM_min_list\n df['MM_avg'] = MM_avg_list\n df['MM_max'] = MM_max_list\n df['MM_median'] = MM_median_list\n\n return df\n\n#---------------------------------------------------------------------------------------------------------------#\n\ndef reduce_matrix(df_path, vals_path, codon_freq, colname_seq, colname_id, out_path, perc):\n\n df = pd.read_csv(df_path)\n vals = pd.read_csv(vals_path)\n vals.drop(vals.columns[vals.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)\n\n vals.columns = df[colname_id]\n vals.index = df[colname_id]\n\n aa_freq, aa_map = get_aa_frequencies_list(codon_freq, amino_acid_table, codon_table)\n\n #get minMax for full data set\n minMax_by_row_full = []\n for seq in df[colname_seq]:\n minMax_by_row_full.append(calc_minMax_percent(seq, aa_freq, codon_freq, codon_table, WINDOW_SIZE))\n\n df = add_col_to_df(minMax_by_row_full, df)\n df_middle = df.sort_values(by='MM_avg', ascending=False)[round(perc*len(df)):round((1-perc)*len(df))]\n\n genes = df_middle[colname_id].values.tolist()\n remove_list = [r for r in df[colname_id] if r not in genes]\n\n vals.drop(index=remove_list, axis=0, inplace=True)\n vals.drop(labels=remove_list, axis=1, inplace=True)\n\n vals.to_csv(out_path)\n\n#script\n# reduce_matrix('../Data/Ecoli/full.csv', '../Files/MM/Full/Plain//HD_ecoli.csv', codon_freq_ecoli, 'mRNA', 'Gene', '../Files/MM/Omit10th/HD_ecoli.csv', 0.1)\n# reduce_matrix('../Data/Ecoli/rand_seqs_ecoli.csv', '../Files/MM/Full/Plain//HD_ecoli_rand.csv', codon_freq_ecoli, 'rand_seqs', 'Gene', '../Files/MM/Omit10th/HD_ecoli_rand.csv', 0.1)\n# reduce_matrix('../Data/Ecoli/shuf_seqs_ecoli.csv', '../Files/MM/Full/Plain//HD_ecoli_shuf.csv', codon_freq_ecoli, 'shuf_seqs', 'Gene', '../Files/MM/Omit10th/HD_ecoli_shuf.csv', 0.1)\n#\n# reduce_matrix('../Data/Ecoli/full.csv', '../Files/MM/Full/Plain//KS_stats_ecoli.csv', codon_freq_ecoli, 'mRNA', 'Gene', '../Files/MM/Omit10th/KS_stats_ecoli.csv', 0.1)\n# reduce_matrix('../Data/Ecoli/rand_seqs_ecoli.csv', '../Files/MM/Full/Plain//KS_stats_ecoli_rand.csv', codon_freq_ecoli, 'rand_seqs', 'Gene', '../Files/MM/Omit10th/KS_stats_ecoli_rand.csv', 0.1)\n# reduce_matrix('../Data/Ecoli/shuf_seqs_ecoli.csv', '../Files/MM/Full/Plain//KS_stats_ecoli_shuf.csv', codon_freq_ecoli, 'shuf_seqs', 'Gene', '../Files/MM/Omit10th/KS_stats_ecoli_shuf.csv', 0.1)\n\n# reduce_matrix('../Data/Yeast/YeastSeqs.csv', '../Files/MM/Full/Plain//HD_yeast.csv', codon_freq_scerevisiae, 'seq', 'locus_tag', '../Files/MM/Omit10th/HD_yeast.csv', 0.1)\n# reduce_matrix('../Data/Yeast/rand_seqs_yeast.csv', '../Files/MM/Full/Plain//HD_yeast_rand.csv', codon_freq_scerevisiae, 'rand_seqs', 'locus_tag', '../Files/MM/Omit10th/HD_yeast_rand.csv', 0.1)\n# reduce_matrix('../Data/Yeast/shuf_seqs_yeast.csv', '../Files/MM/Full/Plain//HD_yeast_shuf.csv', codon_freq_scerevisiae, 'shuf_seqs', 'locus_tag', '../Files/MM/Omit10th/HD_yeast_shuf.csv', 0.1)\n#\n# reduce_matrix('../Data/Yeast/YeastSeqs.csv', '../Files/MM/Full/Plain//KS_stats_yeast.csv', codon_freq_scerevisiae, 'seq', 'locus_tag', '../Files/MM/Omit10th/KS_stats_yeast.csv', 0.1)\n# reduce_matrix('../Data/Yeast/rand_seqs_yeast.csv', '../Files/MM/Full/Plain//KS_stats_yeast_rand.csv', codon_freq_scerevisiae, 'rand_seqs', 'locus_tag', '../Files/MM/Omit10th/KS_stats_yeast_rand.csv', 0.1)\n# reduce_matrix('../Data/Yeast/shuf_seqs_yeast.csv', '../Files/MM/Full/Plain//KS_stats_yeast_shuf.csv', codon_freq_scerevisiae, 'shuf_seqs', 'locus_tag', '../Files/MM/Omit10th/KS_stats_yeast_shuf.csv', 0.1)\n\n\n#script\nreduce_matrix('../Data/Ecoli/full.csv', '../Files/MM/Full/Plain//HD_ecoli.csv', codon_freq_ecoli, 'mRNA', 'Gene', '../Files/MM/Omit30/HD_ecoli.csv', 0.3)\nreduce_matrix('../Data/Ecoli/rand_seqs_ecoli.csv', '../Files/MM/Full/Plain//HD_ecoli_rand.csv', codon_freq_ecoli, 'rand_seqs', 'Gene', '../Files/MM/Omit30/HD_ecoli_rand.csv', 0.3)\nreduce_matrix('../Data/Ecoli/shuf_seqs_ecoli.csv', '../Files/MM/Full/Plain//HD_ecoli_shuf.csv', codon_freq_ecoli, 'shuf_seqs', 'Gene', '../Files/MM/Omit30/HD_ecoli_shuf.csv', 0.3)\n\nreduce_matrix('../Data/Ecoli/full.csv', '../Files/MM/Full/Plain//KS_stats_ecoli.csv', codon_freq_ecoli, 'mRNA', 'Gene', '../Files/MM/Omit30/KS_stats_ecoli.csv', 0.3)\nreduce_matrix('../Data/Ecoli/rand_seqs_ecoli.csv', '../Files/MM/Full/Plain//KS_stats_ecoli_rand.csv', codon_freq_ecoli, 'rand_seqs', 'Gene', '../Files/MM/Omit30/KS_stats_ecoli_rand.csv', 0.3)\nreduce_matrix('../Data/Ecoli/shuf_seqs_ecoli.csv', '../Files/MM/Full/Plain//KS_stats_ecoli_shuf.csv', codon_freq_ecoli, 'shuf_seqs', 'Gene', '../Files/MM/Omit30/KS_stats_ecoli_shuf.csv', 0.3)\n\nreduce_matrix('../Data/Yeast/YeastSeqs.csv', '../Files/MM/Full/Plain//HD_yeast.csv', codon_freq_scerevisiae, 'seq', 'locus_tag', '../Files/MM/Omit30/HD_yeast.csv', 0.3)\nreduce_matrix('../Data/Yeast/rand_seqs_yeast.csv', '../Files/MM/Full/Plain//HD_yeast_rand.csv', codon_freq_scerevisiae, 'rand_seqs', 'locus_tag', '../Files/MM/Omit30/HD_yeast_rand.csv', 0.3)\nreduce_matrix('../Data/Yeast/shuf_seqs_yeast.csv', '../Files/MM/Full/Plain//HD_yeast_shuf.csv', codon_freq_scerevisiae, 'shuf_seqs', 'locus_tag', '../Files/MM/Omit30/HD_yeast_shuf.csv', 0.3)\n\nreduce_matrix('../Data/Yeast/YeastSeqs.csv', '../Files/MM/Full/Plain//KS_stats_yeast.csv', codon_freq_scerevisiae, 'seq', 'locus_tag', '../Files/MM/Omit30/KS_stats_yeast.csv', 0.3)\nreduce_matrix('../Data/Yeast/rand_seqs_yeast.csv', '../Files/MM/Full/Plain//KS_stats_yeast_rand.csv', codon_freq_scerevisiae, 'rand_seqs', 'locus_tag', '../Files/MM/Omit30/KS_stats_yeast_rand.csv', 0.3)\nreduce_matrix('../Data/Yeast/shuf_seqs_yeast.csv', '../Files/MM/Full/Plain//KS_stats_yeast_shuf.csv', codon_freq_scerevisiae, 'shuf_seqs', 'locus_tag', '../Files/MM/Omit30/KS_stats_yeast_shuf.csv', 0.3)\n","repo_name":"ababjac/rare-codon-clustering","sub_path":"Code/Scripts/reduce_matrix.py","file_name":"reduce_matrix.py","file_ext":"py","file_size_in_byte":13326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"496123711","text":"import base64\nimport hashlib\nimport hmac\nimport os.path\nimport time\nimport urllib.parse\nfrom functools import reduce\nfrom pathlib import Path\n\nimport requests\n\n__author__ = 'Mrli'\n\nCONFIG_INI_FILENAME = \"push_config.ini\"\nCONFIG_INI_PATH = Path(__file__).resolve().parent.with_name(\"push_config.ini\")\n\nREMIND_MSG = \"\"\"\n[pusher]\npusher_type = pushplus\n\n[serverchan]\nsec_key =\n\n[dingding]\naccess_token =\nsecret =\n\n[pushplus]\npushplus_token = \n\"\"\"\n\n\nclass PusherException(Exception):\n def __init__(self, message) -> None:\n super().__init__(message)\n\n\nclass IPushUtil:\n def push(self, content: str, title: str = \"\") -> bool:\n pass\n\n\nclass PushplusPush(IPushUtil):\n def __init__(self, push_title, options: dict):\n self.pushplus_token = options.get(\"pushplus_token\")\n self.push_title = push_title\n\n def push(self, content: str, title: str = \"\") -> bool:\n d = {\n \"token\": self.pushplus_token,\n \"template\": \"markdown\",\n \"title\": \"{push_title}-{title}\".format(push_title=self.push_title, title=title),\n \"content\": content\n }\n res = requests.post(\"http://www.pushplus.plus/send\", data=d)\n if not (200 <= res.json().get(\"code\") < 300):\n print(res.json())\n return 200 <= res.json().get(\"code\") < 300\n\n\nclass ServerChanPush(IPushUtil):\n def __init__(self, push_title, options: dict):\n self.sec_key = options.get(\"sec_key\")\n self.push_title = push_title\n\n def push(self, content: str, title: str = \"\") -> bool:\n data = {\n 'text': \"{push_title}-{title}\".format(push_title=self.push_title, title=title),\n 'desp': content\n }\n res = requests.post(url='https://sc.ftqq.com/{}.send'.format(self.sec_key), data=data)\n if not (res.json().get(\"errmsg\") == \"success\"):\n print(res.json())\n return res.json().get(\"errmsg\") == \"success\"\n\n\nclass DingDingPush(IPushUtil):\n URL = \"https://oapi.dingtalk.com/robot/send\"\n\n def __init__(self, push_title, options: dict):\n self.access_token = options.get(\"access_token\")\n self.secret = options.get(\"secret\")\n self.target_url = self.get_url()\n self.push_title = push_title\n\n def get_url(self):\n timestamp = round(time.time() * 1000)\n secret_enc = bytes(self.secret, encoding=\"utf-8\")\n string_to_sign = \"{}\\n{}\".format(timestamp, self.secret)\n string_to_sign_enc = bytes(string_to_sign, encoding=\"utf-8\")\n hmac_code = hmac.new(\n secret_enc, string_to_sign_enc, digestmod=hashlib.sha256\n ).digest()\n sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))\n return self.URL + \"?access_token={access_token}×tamp={timestamp}&sign={sign}\".format(\n access_token=self.access_token, timestamp=timestamp, sign=sign)\n\n def push(self, content: str, title: str = \"\") -> bool:\n msg = self.gen_markdown_msg(title, content)\n return self.send(msg)\n\n def send(self, message):\n resp = requests.post(self.target_url, json=message)\n return resp.json()\n\n @staticmethod\n def gen_text_msg(content, at=None, at_all=False):\n if at is None:\n at = []\n return {\n \"msgtype\": \"text\",\n \"text\": {\"content\": content},\n \"at\": {\"atMobiles\": at, \"isAtAll\": at_all},\n }\n\n def gen_markdown_msg(self, title, text, at=None, at_all=False):\n def generateText():\n res = \"\"\n # 最顶行显示标题\n res += \"# \" + \"{}-\".format(self.push_title) + title + \"\\n\"\n # 内容\n res += text\n # at对象\n res += reduce(lambda x, y: x + \"@\" + y, at, \"\")\n return res\n\n return {\n \"msgtype\": \"markdown\",\n \"markdown\": {\n \"title\": title,\n \"text\": generateText()\n },\n \"at\": {\"atMobiles\": at, \"isAtAll\": at_all},\n }\n\n\nclass Pusher:\n def __init__(self, logger=None):\n if logger:\n self.cout = logger.info\n else:\n self.cout = print\n self._pusher = self.init()\n\n def init(self):\n \"\"\"\n 实例化pusher\n :return:\n \"\"\"\n from configparser import RawConfigParser\n cp = RawConfigParser()\n if not os.path.exists(CONFIG_INI_PATH):\n raise PusherException(\n \"请创建{filename}配置文件\\npusher配置信息如下:\\n{msg}\".format(filename=CONFIG_INI_FILENAME, msg=REMIND_MSG))\n cp.read(CONFIG_INI_PATH, encoding=\"utf8\")\n pusher_type = cp.get(\"pusher\", \"pusher_type\").lower()\n push_title = cp.get(\"pusher\", \"push_title\")\n # 是否使用了pusher\n if not pusher_type:\n self.cout(\"初始化Pusher: 当前未配置Pusher, 如果需要推送功能, 则在{filename}\".format(filename=CONFIG_INI_FILENAME))\n return None\n\n generator_info = dict(cp.items(pusher_type))\n # 检查pusher配置\n if pusher_type and not self._valid(generator_info):\n raise PusherException(\"{}_pusher配置错误,不能为空~\".format(pusher_type))\n\n if pusher_type == \"serverchan\":\n return ServerChanPush(push_title, generator_info)\n elif pusher_type == \"dingding\":\n return DingDingPush(push_title, generator_info)\n elif pusher_type == \"pushplus\":\n return PushplusPush(push_title, generator_info)\n else:\n raise PusherException(\"不可知pusher类型~\")\n\n @staticmethod\n def _valid(config_dict: dict):\n \"\"\"\n 判断字典值是否为空\n :param dict:\n :return:\n \"\"\"\n for v in config_dict.values():\n if not v:\n return False\n return True\n\n def push(self, content: str, title: str = \"\") -> bool:\n if not self._pusher:\n self.cout(\"当前未配置pusher, 消息无法发送\")\n return False\n return self._pusher.push(content, title)\n","repo_name":"Freedomisgood/Zju_health_checkin_helper","sub_path":"helper/pusher.py","file_name":"pusher.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"62"} +{"seq_id":"29576236810","text":"from typing import List\nfrom enum import Enum, auto\nfrom random import *\nfrom math import sqrt\n\nimport pygame as pg\n\n\n# Program to simulate segregation.\n# See : http:#nifty.stanford.edu/2014/mccown-schelling-model-segregation/\n#\n\n# Enumeration type for the Actors\nclass Actor(Enum):\n BLUE = auto()\n RED = auto()\n NONE = auto() # NONE used for empty locations\n\n\n# Enumeration type for the state of an Actor\nclass State(Enum):\n UNSATISFIED = auto()\n SATISFIED = auto()\n NA = auto() # Not applicable (NA), used for NONEs\n\nclass Person:\n \"\"\"Object for storing all cell data and counters\"\"\"\n def __init__(self, state: State, color:Actor) -> None:\n self.state = state\n self.color = color\n self.threshold = NeighborsModel.THRESHOLD\n\n self.friend_count = 0\n self.foe_count = 0\n\n def check_neighbour_percentage(self) -> None:\n \"\"\"checks the neighbour percentage and updates the state accordingly\n Args:\n self\n \"\"\"\n if self.friend_count/(self.friend_count+self.foe_count) < self.threshold and has_neighbours(self) and is_person(self):\n set_unsatisfied(self)\n self.friend_count = 0\n self.foe_count = 0\n \n\n def poked_by(self, external_color:Actor) -> None:\n \"\"\"When cell is poked. Compares color of poking cell and itself\n Args: \n color of poing cell \n \"\"\"\n if external_color == self.color: \n self.friend_count += 1\n else:\n self.foe_count += 1\n self.check_neighbour_percentage()\n\n pass\nWorld = List[List[Actor]] # Type alias\n\n\nSIZE = 100\n\n\ndef neighbours():\n pg.init()\n model = NeighborsModel(SIZE)\n _view = NeighboursView(model)\n model.run()\n\n\nclass NeighborsModel:\n\n # Tune these numbers to test different distributions or update speeds\n FRAME_RATE = 2 # Increase number to speed simulation up\n DIST = [0.25, 0.25, 0.50] # % of RED, BLUE, and NONE\n THRESHOLD = 0.7 # % of surrounding neighbours that should be like me for satisfaction\n\n # ########### These following two methods are what you're supposed to implement ###########\n # In this method you should generate a new world\n # using randomization according to the given arguments.\n @staticmethod\n def __create_world(self, size:int) -> World:\n \n n_locations = size**2\n seed = generate_seed(n_locations, self.DIST)\n\n brave_new_world = make_matrix(seed, size)\n return brave_new_world\n\n #updates world\n def __update_world(self):\n poke_cells_around(self.world)\n self.world = move_cells(self.world)\n \n pass\n\n # ########### the rest of this class is already defined, to handle the simulation clock ###########\n def __init__(self, size:int):\n self.world: World = self.__create_world(self, size)\n # self.world: World = test()\n self.observers = [] # for enabling discoupled updating of the view, ignore\n\n def run(self):\n clock = pg.time.Clock()\n running = True\n while running:\n running = self.__on_clock_tick(clock)\n # stop running\n print(\"Goodbye!\")\n pg.quit()\n\n def __on_clock_tick(self, clock):\n clock.tick(self.FRAME_RATE) # update no faster than FRAME_RATE times per second\n self.__update_and_notify()\n return self.__check_for_exit()\n\n # What to do each frame\n def __update_and_notify(self):\n self.__update_world()\n self.__notify_all()\n\n @staticmethod\n def __check_for_exit() -> bool:\n keep_going = True\n for event in pg.event.get():\n # Did the user click the window close button?\n if event.type == pg.QUIT:\n keep_going = False\n return keep_going\n\n # Use an Observer pattern for views\n def add_observer(self, observer):\n self.observers.append(observer)\n\n def __notify_all(self):\n for observer in self.observers:\n observer.on_world_update()\n\n\n# ---------------- Helper methods ---------------------\n\n\n\n#---------------World building methods----------------\ndef create_person(color: Actor):\n \"\"\"creates person from given arguments\n Args:\n Color of the person\n Returns:\n the created person\n \"\"\"\n if color == \"red\":\n person = Person(State.SATISFIED, Actor.RED)\n elif color == \"blue\":\n person = Person(State.SATISFIED, Actor.BLUE)\n else:\n person = Person(State.NA, Actor.NONE)\n return person\n\ndef generate_seed(n_locations:int, odds:list[int]) -> list[int]:\n \"\"\"Generates seed from probability map\n Args:\n size of the map and the probability map\n Returns:\n flat list of where everythings going to be\n \"\"\"\n red_amount = int(odds[0]*n_locations)\n blue_amount = int(odds[1]*n_locations)\n empty_amount = int(odds[2]*n_locations)\n\n flat_list = create_flat_list(red_amount, blue_amount, empty_amount)\n shuffle(flat_list)\n seed = flat_list\n\n return seed\ndef create_flat_list(red_amount:int, blue_amount:int, empty_amount:int) -> list[Person]:\n \"\"\"creates a random flat list with all the abject in it according to the percentages given\n Args:\n amount of red, blue and empy cells\n Returns:\n a list of Person objects with all the amounts above\n \"\"\"\n reds = [create_person(\"red\")] * red_amount\n blues = [create_person(\"blue\")] * blue_amount\n empties = [create_person(\"none\")] * empty_amount\n\n return reds+blues+empties\n\ndef make_matrix(flat_list:list[Person], size:int) -> list[list[Person]]:\n \"\"\"Makes a matrix with a precific width from a flat list\n Args:\n the flat list, the width of the matrix\n Returns:\n The newly created matrix\n \"\"\"\n output_matrix = [flat_list[row_num*size: (row_num+1)*size] for row_num in range(size)]\n return output_matrix\n\n#--------------Changing methods -------------------\n\ndef poke_cells_around(world:list[list[Person]]):\n \"\"\"Pokes all cells aroud all cells\n Args:\n A matrix of the world\n \"\"\"\n size = len(world)\n for y, row in enumerate(world, start=0):\n #checks row index for lenghth incase canvas is not square\n for x in range(len(row)):\n current = world[y][x]\n if is_person(current): #checks so thats the index actually has an actor\n indexes_to_be_poked = set_poke_indexes(x, y)\n\n for index in indexes_to_be_poked: #checks if indexes are valid and pokes the valid ones\n if is_valid_location(size, index[1], index[0]): #checks if location is valid\n neighbour = world[index[1]][index[0]]\n neighbour.poked_by(current.color)\n \ndef is_person(person:Person) -> bool:\n \"\"\"checks if input item is empty square or person\n Args:\n person object\n Returns:\n Bool for if it is empty or person\n \"\"\"\n if person.color == Actor.NONE:\n return False\n return True\n \n\ndef set_satisfied(person:Person):\n \"\"\"sets person state to satisfied\n Args:\n person object\n \"\"\"\n person.state = State.SATISFIED\n\ndef set_unsatisfied(person):\n \"\"\"sets person state to unsatisfied\n Args:\n person object\n \"\"\"\n person.state = State.UNSATISFIED\n\n\ndef has_neighbours(item:Person) -> bool:\n \"\"\"Checks if there are any neighbours around the given item\n Args:\n a Person object to be checked\n Returns:\n A bool of wther the object has neighbours or not\n \"\"\"\n if item.friend_count+item.foe_count == 0: \n return False\n else:\n return True\n\ndef move_cells(world:list[list[Person]]) -> list[list[Person]]:\n \"\"\"Moves cells that are unsatisfied to new empty spaces\n Args:\n The world matrix\n Returns:\n The new updated world matrix\n \"\"\"\n empty_indexes = find_empty_indexes(world)\n for i, row in enumerate(world, start = 0):\n for j, item in enumerate(row, start = 0):\n if item.state == State.UNSATISFIED:\n color = item.color\n random_empty_place = find_random_empty_place(empty_indexes)\n #creates new object at emplty index\n world = create_new_object_at_empty_index(random_empty_place, world, color)\n #add an empty object at the old index\n world[i][j] = clear()\n # adds the newly cleared index to the empty list\n empty_indexes.append([i,j])\n return world\n \ndef find_random_empty_place(empty_indexes:list[list[int]]):\n \"\"\"checks throug hempty indexes and finds a random one\n Args:\n List of empty indexes\n Returns:\n a random one in the list\n \"\"\"\n i = randint(0, len(empty_indexes)-1)\n random_empty_place = empty_indexes[i]\n del empty_indexes[i] # removes the used index from the empty indexes list\n return random_empty_place\n\ndef create_new_object_at_empty_index(empty_place:list[int], world:list[list[Person]], color:Actor) -> list[list[Person]]:\n \"\"\"Creates new object at an index\n Args:\n Empty index[x and y, the world matrix, color of the actor to be created\n \"\"\"\n world[empty_place[0]][empty_place[1]] = Person(State.SATISFIED, color)\n return world\n\ndef clear() -> Person:\n \"\"\"Creates an empty player\n Returns: empty player\n \"\"\"\n return Person(State.NA, Actor.NONE)\n\ndef find_empty_indexes(world:list[list[Person]]) -> list[list[int]]:\n \"\"\"Finds empty indexes in the world\n Args:\n The world matrix\n Returns:\n All empty indexes in world\n \"\"\"\n output = []\n for i, row in enumerate(world, start = 0):\n for j, item in enumerate(row, start = 0):\n if item.color == Actor.NONE:\n output.append([i,j])\n return output\n\ndef set_poke_indexes(current_x: int, current_y: int) -> list[list[int]]:\n \"\"\"Sets the indexes thats are to be poked\n \"\"\"\n x1 = current_x + 1\n x2 = current_x - 1\n y1 = current_y + 1\n y2 = current_y - 1\n output = [\n [x1, current_y],\n [x2, current_y],\n [current_x, y1],\n [current_x, y2],\n [x1, y1],\n [x2, y1],\n [x1, y2],\n [x2, y2] \n ]\n return output\n\n\n # Check if inside world\ndef is_valid_location(size: int, row: int, col: int) -> bool:\n return 0 <= row < size and 0 <= col < size\n\n\n# ------- Testing -------------------------------------\n\n# Here you run your tests i.e. call your logic methods\n# to see that they really work\ndef test():\n # A small hard coded world for testing\n test_world = [\n [Person(State.SATISFIED, Actor.RED), Person(State.SATISFIED, Actor.RED), Person(State.NA, Actor.NONE)],\n [Person(State.NA, Actor.NONE), Person(State.SATISFIED, Actor.BLUE), Person(State.NA, Actor.NONE)],\n [Person(State.SATISFIED, Actor.RED), Person(State.NA, Actor.NONE), Person(State.SATISFIED, Actor.BLUE)]\n ]\n \n th = 0.5 # Simpler threshold used for testing\n\n size = len(test_world)\n print(is_valid_location(size, 0, 0))\n print(not is_valid_location(size, -1, 0))\n print(not is_valid_location(size, 0, 3))\n print(is_valid_location(size, 2, 2))\n\n # TODO More tests\n return test_world\n exit(0)\n\n\n# Helper method for testing\ndef count(a_list, to_find):\n the_count = 0\n for a in a_list:\n if a == to_find:\n the_count += 1\n return the_count\n\n\n# ########### NOTHING to do below this row, it's pygame display stuff ###########\n# ... but by all means have a look at it, it's fun!\nclass NeighboursView:\n # static class variables\n WIDTH = 700 # Size for window\n HEIGHT = 700\n MARGIN = 10\n\n WHITE = (255, 255, 255)\n RED = (255, 0, 0)\n BLUE = ( 0, 0, 255)\n\n # Instance methods\n\n def __init__(self, model: NeighborsModel):\n pg.init() # initialize pygame, in case not already done\n self.dot_size = self.__calculate_dot_size(len(model.world))\n self.screen = pg.display.set_mode([self.WIDTH, self.HEIGHT])\n self.model = model\n self.model.add_observer(self)\n\n def render_world(self):\n # # Render the state of the world to the screen\n self.__draw_background()\n self.__draw_all_actors()\n self.__update_screen()\n\n # Needed for observer pattern\n # What do we do every time we're told the model had been updated?\n def on_world_update(self):\n self.render_world()\n\n # private helper methods\n def __calculate_dot_size(self, size):\n return max((self.WIDTH - 2 * self.MARGIN) / size, 2)\n\n @staticmethod\n def __update_screen():\n pg.display.flip()\n\n def __draw_background(self):\n self.screen.fill(NeighboursView.WHITE)\n\n def __draw_all_actors(self):\n for row in range(len(self.model.world)):\n for col in range(len(self.model.world[row])):\n self.__draw_actor_at(col, row)\n\n def __draw_actor_at(self, col, row):\n color = self.__get_color(self.model.world[row][col].color)\n xy = self.__calculate_coordinates(col, row)\n pg.draw.circle(self.screen, color, xy, self.dot_size / 2)\n\n # This method showcases how to nicely emulate 'switch'-statements in python\n @staticmethod\n def __get_color(actor):\n return {\n Actor.RED: NeighboursView.RED,\n Actor.BLUE: NeighboursView.BLUE\n }.get(actor, NeighboursView.WHITE)\n\n def __calculate_coordinates(self, col, row):\n x = self.__calculate_coordinate(col)\n y = self.__calculate_coordinate(row)\n return x, y\n\n def __calculate_coordinate(self, offset):\n x: float = self.dot_size * offset + self.MARGIN\n return x\n\n\nif __name__ == \"__main__\":\n neighbours()\n","repo_name":"casperHansenTheEnemyOfAustria/GruProgLabb","sub_path":"neighbours/neighbours_scetch.py","file_name":"neighbours_scetch.py","file_ext":"py","file_size_in_byte":14062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2909936098","text":"from tkinter import *\n\nwindow = Tk()\nwindow.title(\"Mile to Km Converter\")\nwindow.config(padx=40, pady=20, bg=\"white\")\n\n\ndef conversion():\n result.config(text=str(round(float(mile.get())*1.609, 2)))\n\n\ntext = Label(text=\"is equal to \", bg=\"white\")\ntext.grid(column=0, row=2)\n\nresult = Label(text=\"0\", bg=\"white\")\nresult.grid(column=1, row=2)\nresult.config(padx=30, pady=10)\n\nkm_symbol = Label(text=\"Km\", bg=\"white\")\nkm_symbol.grid(column=2, row=2)\n\nmile = Entry(width=10)\nmile.grid(column=1, row=1)\n\nmile_symbol = Label(text=\"Miles\", bg=\"white\")\nmile_symbol.grid(column=2, row=1)\n\ncalculate = Button(text=\"Calculate\", command=conversion)\ncalculate.grid(column=1, row=3)\n\nwindow.mainloop()\n","repo_name":"lfalcaolopes/100-Days-of-Code--Python","sub_path":"Day 27 - Mile to Kilometers Converter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"31215878053","text":"from turtle import position\nfrom p5 import *\nfrom y_py import YDoc, YArray, AfterTransactionEvent\nfrom client import YDocWSClient\n\ndoc: YDoc\nstrokes: YArray\nclient: YDocWSClient\n\n\ndef setup():\n \"\"\"\n Initialization logic that runs before the `draw()` loop.\n \"\"\"\n global strokes\n global doc\n global client\n title(\"Ypy Drawing Demo\")\n size(720, 480)\n doc = YDoc(0)\n strokes = doc.get_array(\"strokes\")\n client = YDocWSClient()\n doc.observe_after_transaction(client.send_updates)\n \n\n\ndef draw():\n \"\"\"\n Handles user input and updates the canvas.\n \"\"\"\n global strokes\n global doc\n global client\n client.apply_updates(doc)\n rect_mode(CENTER)\n background(255)\n if mouse_is_pressed:\n with doc.begin_transaction() as txn:\n strokes.append(txn, [mouse_x, mouse_y])\n fill(0)\n no_stroke()\n for x,y in strokes:\n ellipse((x, y), 33, 33)\n\nrun(frame_rate=60)\n","repo_name":"y-crdt/ypy","sub_path":"examples/drawing/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"62"} +{"seq_id":"35731371070","text":"\n\ndef isValid(A):\n _map = {}\n\n for char in A:\n if char in _map:\n _map[char] = _map.get(char)+1\n else:\n _map[char] = 1\n value_map = {}\n\n for key in _map:\n value_map.setdefault(_map[key],[]).append(key)\n \n len_keys = len(value_map.keys())\n if len_keys > 2:\n return \"NO\"\n if len_keys == 1:\n return \"YES\"\n print(value_map)\n a = list(value_map.keys())[0]\n b = list(value_map.keys())[1]\n if not (a - b == 1 or a-b == -1):\n return \"NO\"\n a = len(value_map.get(a))\n b = len(value_map.get(b))\n if a == 1 or b == 1:\n return \"YES\"\n return \"NO\"\n\nprint(isValid(\"aabbcc\"))\n\n","repo_name":"abu-abraham/misc","sub_path":"Trials/hackerrank.py","file_name":"hackerrank.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21138105088","text":"# 정렬된 배열의 정합2\n# 정렬된 배열 num1, num2가 주어지고, 각각의 크기는 m, n이다. \n# 정렬을 유지하면서 num1부터 채워나가 num2까지 확장해보자.\n# * 추가 설명\n# - 병합된 m+n 크기만큼의 공간은 이번엔 존재하지 않는다.\n# - num1 배열에 num1, num2 모든 요소를 작은 수부터 채워나가고 num2에 나머지 요소를 정렬 유지해서 넣자.\n# - 추가 배열 할당은 없다.\n\n# # test case 1\n# num1 = [1,3,5,7]\n# m = 4\n# num2 = [2,4,8]\n# n = 3\n# # test case 2 \n# num1 = [2,8,10]\n# m=3\n# num2 = [5]\n# n=1\n# test case 3\nnum1 = [2,8,10]\nm = 3\nnum2 = [1,2,5,8,9,12]\nn = 6\n\n\ndef mergeSortedList2(num1, m, num2, n) :\n i = m-1\n j = n-1\n while (i >= 0) : \n while j >= 0 :\n if num1[i] > num2[j] :\n num1[i],num2[j] = num2[j],num1[i]\n else :\n j -= 1 \n i -= 1\n j = n-1 \n\n num1[:] = sorted(num1)\n \n\nmergeSortedList2(num1, m, num2, n)\nprint(f\"{num1} {num2}\")\n","repo_name":"hgkim00/Algorithm","sub_path":"other/mergeSortedList2.py","file_name":"mergeSortedList2.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41223361235","text":"import sys\nimport os\nimport itertools\n\ndef main(taskid):\n\n experiment_name = 'submission'\n\n taskid = int(taskid[0])\n hyperparameter_config = {\n 'X-test-std': [1.0, 3.0],\n 'realizable': [0, 1],\n 'use-minibatch': [0, 1],\n 'use-rr-relu': [0, 1],\n 'ffrelu-layers': [1, 5],\n }\n keys, values = zip(*hyperparameter_config.items())\n hyperparameter_experiments = [dict(zip(keys, v)) for v in itertools.product(*values)]\n temp = hyperparameter_experiments[taskid]\n\n os.system(\"python3 lastlayerbayesian.py \"\n \"--num-n 10 --MCs 30 \"\n \"--experiment-name %s \"\n \"--taskid %s \"\n \"--X-test-std %s \"\n \"--realizable %s \"\n \"--use-rr-relu %s \"\n \"--ffrelu-layers %s \"\n \"--use-minibatch %s \"\n %(experiment_name,\n taskid,\n temp['X-test-std'],\n temp['realizable'],\n temp['use-rr-relu'],\n temp['ffrelu-layers'],\n temp['use-minibatch']))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n","repo_name":"suswei/RLCT","sub_path":"sweep.py","file_name":"sweep.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"17665850239","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport propagators\nimport utilities\nfrom tqdm import trange\n\n# Constants definition\nG = 6.6743 * pow(10, -11)\nM_earth = 5.977 * pow(10, 24)\nmu_earth = G * M_earth\nR_earth = 6378140 # [meters]\n\n# Orbit definition\nH = 500000 # Altitude, [meters]\na = R_earth + H # Semi-major axis, [meters]\ni = 97.4065 # Inclination, [deg]\ne = 3.73173 * pow(10, -16) # Eccentricity, []\nOmega = 345 # Longitude of the Ascending Node [deg]\nomega = 0 # Argument of pericenter [deg]\nM = 0 # Mean anomaly, [deg]\nT = 2 * np.pi * np.sqrt(pow(a, 3) / mu_earth) # Orbital period, [seconds]\n\n# Compute initial position and velocity vector from orbital elements\nr_vector, v_vector = utilities.orbital_elements_to_cartesian(a, e, i, Omega, omega, M, mu_earth)\nx = r_vector[0]\ny = r_vector[1]\nz = r_vector[2]\nvx = v_vector[0]\nvy = v_vector[1]\nvz = v_vector[2]\n\n# Propagate orbit (all times in seconds)\nstart_time = 0\nend_time = 604800\ntime_step = 60\nx_sc_vector, y_sc_vector, z_sc_vector, vx_sc_vector, vy_sc_vector, vz_sc_vector, t_vector = \\\n propagators.runge_kutta_4(x, y, z, vx, vy, vz, mu_earth, start_time, end_time, time_step)\n\n# Compute sun position\nx_sun_vector = [139861683376.804]\ny_sun_vector = [2359460644.848]\nz_sun_vector = [59677952894.078]\ncounter = 0\nfor time in trange(start_time, end_time, time_step, desc=\"Sun position\"):\n x_sun, y_sun = utilities.sun_position_calculator(x_sun_vector[counter], y_sun_vector[counter], time)\n x_sun_vector.append(x_sun)\n y_sun_vector.append(y_sun)\n z_sun_vector.append(0.0)\n counter += 1\n\n# Solar panel area definition\nsolar_panel_area = 0.021049\nsolar_panel_efficiency = 0.284\n\n# Compute generated power\nsolar_flux = 1367\npower = [0] * np.size(t_vector)\nfor i in trange(0, np.size(t_vector), desc=\"Power\"):\n sunlight = utilities.advanced_shadow_check(x_sc_vector[i], y_sc_vector[i], z_sc_vector[i], x_sun_vector[i],\n y_sun_vector[i], z_sun_vector[i], R_earth)\n power[i] = sunlight * solar_flux * solar_panel_area * solar_panel_efficiency\n\n# Plot results\nplt.figure()\nplt.plot(t_vector, power)\nplt.xlabel(\"Time, t, [sec]\")\nplt.ylabel(\"Power Generated, $P_{gen}$, [W]\")\nplt.grid()\nplt.show()\n","repo_name":"aretselis/retseprop","sub_path":"src/power_generation.py","file_name":"power_generation.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28069169848","text":"# import the function defined in spider_room.py\nfrom spider_room import spider_room\n\n# import the function defined in deathclaw_room.py\nfrom deathclaw_room import deathclaw_room\n\n# import the function defined in vampire_room.py\nfrom vampire_room import vampire_room\n\n# import the function defined in game_over.py\nfrom game_over import game_over\n\ndef start():\n # give the inital prompts.\n print(\"\\nYou are standing in a dark and creepy hallway.\")\n print(\"There is a door to your left, another to the right and a hallway straight ahead. Which way do you go? (l, r or s)\")\n\n # convert the player's input() to lower_case\n answer = input(\">\").lower()\n\n if \"l\" in answer:\n # if the player typed \"left\" or \"l\" lead him to the spider_room()\n spider_room()\n \n elif \"r\" in answer:\n # else if the player typed \"right\" or \"r\" lead him to the deathclaw_room()\n deathclaw_room()\n\n elif \"s\" in answer:\n # else if the player typed \"straight\" or \"s\" lead him to the vampire_room()\n vampire_room()\n\n else:\n # else call game_over() function with the \"reason\" argument\n game_over(\"Don't you know how to type something properly>\")\n\n\n# starting the game\nstart()","repo_name":"lemondrop81/python-game","sub_path":"adventure.py","file_name":"adventure.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22594718411","text":"#!/usr/bin/python3\n\"\"\"Extend your Python script to export data in the JSON format.\"\"\"\nimport json\nimport requests\n\n\ndef export_all_employee_tasks_to_json():\n \"\"\"Exports all tasks from all employees to JSON.\"\"\"\n url = \"https://jsonplaceholder.typicode.com\"\n users_url = f\"{url}/users\"\n todos_url = f\"{url}/todos\"\n\n users = requests.get(users_url).json()\n todos = requests.get(todos_url).json()\n\n user_tasks = {}\n for user in users:\n user_id = user['id']\n username = user['username']\n user_todos = [todo for todo in todos if todo['userId'] == user_id]\n task_list = []\n for todo in user_todos:\n task = {\n \"username\": username,\n \"task\": todo['title'],\n \"completed\": todo['completed']\n }\n task_list.append(task)\n user_tasks[user_id] = task_list\n\n with open('todo_all_employees.json', 'w') as file:\n json.dump(user_tasks, file)\n\n\nif __name__ == \"__main__\":\n export_all_employee_tasks_to_json()\n","repo_name":"connortrue/holbertonschool-back-end","sub_path":"api/3-dictionary_of_list_of_dictionaries.py","file_name":"3-dictionary_of_list_of_dictionaries.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38432386704","text":"from typing import Sequence\n\ndef reverse_array(A:Sequence) -> Sequence:\n length = len(A)\n for i in range(length//2):\n A[i], A[length - i - 1] = A[length - i - 1], A[i]\n return A\n\nif __name__ == '__main__':\n print('입력한 숫자를 배열로 만든 후 거꾸로 출력합니다.')\n print('\\'-q\\'를 입력하시면 입력을 끝내고 출력을 시작합니다.')\n seq = []\n while True:\n number = input('입력하시오 : ')\n if(number == '-q'):\n break\n seq.append(number)\n \n print(f'{reverse_array(seq)}')\n","repo_name":"jedi-hoodie12/algorithm-training-python","sub_path":"Chap02/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"42772150162","text":"# -*- coding: utf-8 -*-\nimport random\n\n\ndef best_of(bo, iterator_):\n \"\"\"Function used for deciding something in Best of Something matches.\n\n Parameters\n ----------\n bo : int\n Best Of (Number of Decider \"matches\").\n\n iterator_ : iterator\n list or tuple with player names.\n\n Returns\n -------\n\n \"\"\"\n for i in range(bo):\n print(random.choice(iterator_))\n\n\ndef furigoma():\n \"\"\"Shogi starting player method.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n pawn_state = ('normal', 'promoted')\n\n player_1 = []\n player_2 = []\n for pawn in range(5):\n player_1.append(random.choice(pawn_state))\n player_2.append(random.choice(pawn_state))\n\n return player_1.count(pawn_state[-1]), player_2.count(pawn_state[-1])\n\n\nif __name__ == '__main__':\n list_ = []\n for i in range(10):\n list_.clear()\n for j in range(100000):\n play = furigoma()\n list_.append(play.index(max(play)) + 1)\n\n print('player 1: {}\\nplayer 2: {}\\n-------------'.format(\n list_.count(1), list_.count(2)\n ))\n","repo_name":"Brunopaes/python-sandbox","sub_path":"src/misc/furigoma.py","file_name":"furigoma.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"25600115619","text":"\n\nfrom colorspace import lighten, darken\nfrom pytest import raises\nimport numpy as np\n\n# ------------------------------------------\n# Wrong usage\n# ------------------------------------------\ndef test_wrong_usage(col = \"#BB7784\"):\n raises(TypeError, lighten, col = 123)\n raises(TypeError, lighten, col = col, method = [\"foo\"])\n raises(ValueError, lighten, col = col, method = \"foo\")\n raises(TypeError, lighten, col = col, space = [\"foo\"])\n raises(ValueError, lighten, col = col, space = \"foo\")\n raises(TypeError, lighten, col = col, fixup = \"foo\")\n\n\n# ------------------------------------------\n# Lighten colors\n# ------------------------------------------\ndef test_lighten_relative_HCL(col = \"#BB7784\"):\n tmp = {\"#C7828F\": 0.1, \"#D28E9A\": 0.2, \"#F6B0BD\": 0.5}\n for sol,amount in tmp.items():\n x = lighten(col = col, amount = amount, method = \"relative\", space = \"HCL\")\n assert x == sol\ndef test_lighten_absolute_HCL(col = \"#BB7784\"):\n tmp = {\"#D7929E\": 0.1, \"#F3ADB9\": 0.2, \"#FFFFFF\": 0.5}\n for sol,amount in tmp.items():\n x = lighten(col = col, amount = amount, method = \"absolute\", space = \"HCL\")\n assert x == sol\n\n## Seems I still have an issue with HLS -> hex\n#def test_lighten_relative_HLS(col = \"#BB7784\"):\n# tmp = {\"#C28590\": 0.1, \"#C9929D\": 0.2, \"#DDBBC2\": 0.5}\n# for sol,amount in tmp.items():\n# x = lighten(col = col, amount = amount, method = \"relative\", space = \"HLS\")\n# assert x == sol\n#def test_lighten_absolute_HLS(col = \"#BB7784\"):\n# tmp = {\"#CC99A3\": 0.1, \"#DDBBC2\": 0.2, \"#FFFFFF\": 0.5}\n# for sol,amount in tmp.items():\n# x = lighten(col = col, amount = amount, method = \"absolute\", space = \"HLS\")\n# assert x == sol\n\n# TODO(R): missing 'combined' where I do have a similar/the same problem\n# as with absolute HLS -> seems my HLS to hex does not work as intended.\n\n\n\n# ------------------------------------------\n# Darkening colors\n# ------------------------------------------\ndef test_darken_relative_HCL(col = \"#BB7784\"):\n tmp = {\"#AC6875\": 0.1, \"#9C5967\": 0.2, \"#722D3D\": 0.5}\n for sol,amount in tmp.items():\n x = darken(col = col, amount = amount, space = \"HCL\")\n assert x == sol\n\n## Seems I still have an issue with HLS -> hex\n#def test_darken_relative_HLS(col = \"#BB7784\"):\n# tmp = {\"#C28590\": 0.1, \"#C9929D\": 0.2, \"#DDBBC2\": 0.5}\n# for sol,amount in tmp.items():\n# x = darken(col = col, amount = amount, space = \"HLS\")\n# assert x == sol\n\n# TODO(R): missing 'combined' where I do have a similar/the same problem\n# as with absolute HLS -> seems my HLS to hex does not work as intended.\n\n# ------------------------------------------\n# Testing returns\n# ------------------------------------------\ndef test_return_str():\n col = \"#BB7784\"\n res = lighten(col)\n assert isinstance(res, str)\n\ndef test_return_list():\n from colorspace.palettes import palette\n pal = palette([\"#023FA5\", \"#E2E2E2\", \"#8E063B\"], \"test_palette\")\n res = lighten(pal)\n assert isinstance(res, palette)\n assert pal.name() == \"test_palette\"\n\ndef test_return_colorobject():\n from colorspace import lighten\n from colorspace.colorlib import colorobject, hexcols, RGB, HCL\n\n # Testing hexcols object\n col = hexcols([\"#023FA5\", \"#E2E2E2\", \"#8E063B\"])\n res = lighten(col)\n assert isinstance(res, colorobject)\n assert isinstance(res, type(col))\n assert col.length() == res.length()\n\n # RGB test .. note: lighten() returns colorobject of class hexcols\n col = RGB(R = [0.5, 0.8, 0], G = [0.5, 0, 0.8], B = [0, 0.8, 0.5])\n res = lighten(col)\n assert isinstance(res, colorobject)\n assert col.length() == res.length()\n\n # HCL test .. note: lighten() returns colorobject of class hexcols\n col = HCL(H = [-180, 0, 180], C = [30, 50, 30], L = [60, 40, 60])\n res = lighten(col)\n assert isinstance(res, colorobject)\n assert col.length() == res.length()\n\n","repo_name":"retostauffer/python-colorspace","sub_path":"colorspace/tests/test_utils_lighten_darken.py","file_name":"test_utils_lighten_darken.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"62"} +{"seq_id":"42705252553","text":"import json\nimport logging\nimport os\nimport random\nfrom copy import deepcopy\nfrom typing import List\n\nimport yaml\n\nlogging.basicConfig(\n level=os.environ.get(\"LOGLEVEL\", \"DEBUG\").upper(),\n format=\"%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\ndef get_objects_and_locations(\n total_objects: list,\n maximum_num_objects_per_human: int,\n maxiumum_days_period: int,\n maximum_num_locations_per_object: int,\n commonsense_prob: float,\n possible_object_locations: list,\n semantic_knowledge: dict,\n) -> List:\n \"\"\"Get objects and their locations for one human.\n\n Args\n ----\n total_objects: total possible objects.\n maximum_num_objects_per_human: maximum number of objects per human\n maxiumum_days_period: maximum number of days per period.\n maximum_num_locations_per_object: maximum number of locations per object.\n commonsense_prob: the probability of an object being located at a commonsense\n location.\n possible_object_locations: possible object locations,\n semantic_knowledge: commonsense knowledge,\n\n Returns\n -------\n objects and their locations (e.g., [[\"laptop\", \"desk\"], [\"laptop\", \"desk\"],\n [\"laptop\", \"table\"], [\"laptop\", \"desk\"]])\n\n \"\"\"\n logging.debug(\"Getting objects and their locations for one human ...\")\n\n random.shuffle(total_objects)\n num_objects = random.randint(1, maximum_num_objects_per_human)\n objs = total_objects[:num_objects]\n\n object_locations = []\n\n for obj in objs:\n num_days_period = random.randint(1, maxiumum_days_period)\n num_locations_per_object = random.randint(1, maximum_num_locations_per_object)\n\n probs = [commonsense_prob] + [\n (1 - commonsense_prob) / (num_locations_per_object - 1)\n for _ in range(num_locations_per_object - 1)\n ]\n\n obj_locs = [semantic_knowledge[obj]]\n count = 0\n while len(obj_locs) != num_locations_per_object:\n # print(obj_locs)\n\n loc = random.choice(possible_object_locations)\n if loc not in obj_locs:\n obj_locs.append(loc)\n count += 1\n\n assert len(obj_locs) == len(probs)\n\n obj_locs = random.choices(obj_locs, probs, k=num_days_period)\n for loc in obj_locs:\n object_locations.append([obj, loc])\n\n return object_locations\n\n\ndef main(\n semantic_knowledge_path: str,\n human_names_path: str,\n save_path: str,\n num_humans: int,\n num_total_objects: int,\n maximum_num_objects_per_human: int,\n maximum_num_locations_per_object: int,\n commonsense_prob: float,\n maxiumum_days_period: int,\n last_timestep: int,\n seed: int,\n) -> None:\n \"\"\"Run!\n\n Args\n ----\n semantic_knowledge_path: e.g., \"./room_env/data/semantic-knowledge.json\"\n human_names_path: e.g., \"./room_env/data/human-names\"\n save_path: e.g., \"./room_env/data/des-config-m.json\"\n num_humans: e.g., 8\n num_total_objects: e.g., 8\n maximum_num_locations_per_object: maximum number of locations per object (e.g., 8)\n commonsense_prob: commonsense probability (e.g., 0.8)\n maxiumum_days_period: maximum number of days per period.\n last_timestep: the last day when the DES stops (e.g., 1000).\n seed: random seed\n\n \"\"\"\n assert num_total_objects >= maximum_num_objects_per_human\n\n config = {\"components\": {}, \"resources\": {}, \"last_timestep\": last_timestep}\n\n assert maximum_num_locations_per_object <= maxiumum_days_period\n\n # for reproducibility\n random.seed(seed)\n with open(human_names_path, \"r\") as stream:\n human_names = [foo.strip() for foo in stream.readlines()]\n\n with open(semantic_knowledge_path, \"r\") as stream:\n semantic_knowledge = json.load(stream)\n\n assert num_humans <= len(human_names)\n\n logging.debug(\n f\"There were {len(semantic_knowledge)} objects before removing the duplicate \"\n \"object locations.\"\n )\n unique_locations = []\n\n for key, val in deepcopy(semantic_knowledge).items():\n if \"_\" in key:\n del semantic_knowledge[key]\n continue\n if val[\"AtLocation\"][0][\"tail\"] in unique_locations:\n del semantic_knowledge[key]\n continue\n if \"_\" in val[\"AtLocation\"][0][\"tail\"]:\n del semantic_knowledge[key]\n continue\n # This avoids locations being same as object names.\n if val[\"AtLocation\"][0][\"tail\"] in list(semantic_knowledge):\n del semantic_knowledge[key]\n continue\n\n unique_locations.append(val[\"AtLocation\"][0][\"tail\"])\n\n logging.info(\n f\"There are now {len(semantic_knowledge)} objects before after the duplicate \"\n \"object locations.\"\n )\n\n semantic_knowledge = {\n key: val[\"AtLocation\"][0][\"tail\"] for key, val in semantic_knowledge.items()\n }\n\n assert num_total_objects <= len(semantic_knowledge)\n assert maximum_num_objects_per_human <= len(semantic_knowledge)\n assert maximum_num_locations_per_object <= len(semantic_knowledge)\n\n random.shuffle(human_names)\n total_humans = human_names[:num_humans]\n\n total_objects = list(semantic_knowledge)\n random.shuffle(total_objects)\n total_objects = total_objects[:num_total_objects]\n\n possible_object_locations = list(semantic_knowledge.values())\n possible_object_locations = [\n loc\n for loc in possible_object_locations\n if loc not in human_names and loc not in total_objects\n ]\n\n semantic_knowledge = {obj: semantic_knowledge[obj] for obj in total_objects}\n\n assert len(semantic_knowledge) == num_total_objects\n\n random.shuffle(total_humans)\n\n for human in total_humans:\n\n config[\"components\"][human] = get_objects_and_locations(\n total_objects,\n maximum_num_objects_per_human,\n maxiumum_days_period,\n maximum_num_locations_per_object,\n commonsense_prob,\n possible_object_locations,\n semantic_knowledge,\n )\n\n config[\"semantic_knowledge\"] = semantic_knowledge\n\n config[\"resources\"] = {}\n config[\"complexity\"] = (\n num_humans\n * num_total_objects\n * maximum_num_objects_per_human\n * maximum_num_locations_per_object\n * maxiumum_days_period\n )\n with open(save_path, \"w\") as stream:\n json.dump(config, stream, indent=4, sort_keys=False)\n\n logging.info(f\"DES config done! they are saved at {save_path}\")\n\n\nif __name__ == \"__main__\":\n with open(\"./create_des_config.yaml\", \"r\") as stream:\n config = yaml.safe_load(stream)\n print(\"Arguments:\")\n for k, v in config.items():\n print(f\" {k:>21} : {v}\")\n\n main(**config)\n","repo_name":"tae898/erc","sub_path":"create_des_config.py","file_name":"create_des_config.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"62"} +{"seq_id":"22229271144","text":"import ConfigParser\nimport argparse\nimport os\nimport subprocess\nfrom shutil import copyfile\nfrom threading import Thread, Timer\n\nimport pexpect\nimport sys\nimport logging\n\nimport time\n\nfrom rumps import rumps\n\n# Default values\n\nIKEC_PATH = '/usr/local/bin/ikec'\nIKED_PATH = '/usr/local/sbin/iked'\nRETRY_SLEEP_DURATION = 2\nPING_HOST = None\nPING_ENABLED = False\nNOGUI = False\nDEBUG_ENABLED = False\nSELECTED_PROFILE = None\nRUMPS_DEBUG = False\n\n\n# DO NOT EDIT BELOW THIS LINE\n\nCONFIG_LOADED = '.*config loaded for site.*'\nFAIL_TO_LOAD = '.*failed to load'\nTUNNEL_ENABLED = 'tunnel enabled'\nDETACHED = 'detached from key daemon'\n\nDEFAULT_ICON = 'resources/images/q1.png'\nCONNECTING_ICON = 'resources/images/q3.png'\nCONNECTED_ICON = 'resources/images/q2.png'\nPLAY_ICON = 'resources/images/play.png'\nCROSS_ICON = 'resources/images/exit.png'\nSTOP_ICON = 'resources/images/stop.png'\nABOUT_ICON = 'resources/images/user.png'\nSETTINGS_ICON = 'resources/images/sliders.png'\nSHOWLOG_ICON = 'resources/images/half.png'\n\nLOGNAME = 'MacShrew.log'\n\nlogger = None\ngui = None\n\nclass APP_STATES:\n STARTED = 0b0001\n CONNECTED = 0b0010\n CONNECTING = 0b0100\n STOPPING = 0b1000\n\nclass ShrewHelperWorker(Thread):\n\n def __init__(self, profile_name):\n global logger\n Thread.__init__(self)\n self._monitor_timer = None\n self._child = None\n self.set_state(0)\n self.profile_name = profile_name\n self.logger = logger\n\n def __execute_binary(self):\n \"\"\"\n Spawns the ikec binary with specified profile name\n :return:\n \"\"\"\n self.set_state(APP_STATES.CONNECTING)\n self.logger.info(\"Starting ikec binary with %s -r \\\"%s\\\"\" % (IKEC_PATH, self.profile_name))\n self._child = pexpect.spawn('%s -r \\\"%s\\\"' % (IKEC_PATH, self.profile_name))\n self._child.logfile_read = StreamProxy()\n self.__step_initialisation()\n\n def __step_initialisation(self):\n \"\"\"\n We expect that the profile name specified for iked was successfully loaded\n :return:\n \"\"\"\n\n i = self._child.expect([pexpect.TIMEOUT, CONFIG_LOADED, FAIL_TO_LOAD])\n\n if i == 0:\n fatal(\"Timeout while executing ikec\")\n self.disconnect()\n\n if i == 1:\n self.logger.info(\"Config loaded\")\n self.__step_send_connect()\n\n if i == 2:\n fatal(\"Fail to load site configuration for %s\" % self.profile_name)\n self.disconnect()\n\n def state(self):\n return self._state\n\n def set_state(self, value):\n \"\"\"\n If the state of the worker changes, we notify the GUI as well (if enabled)\n :param value: APP_STATES bitmask value\n :return:\n \"\"\"\n global gui\n if not NOGUI:\n if value & APP_STATES.CONNECTING:\n gui.icon = CONNECTING_ICON\n else:\n if value & APP_STATES.CONNECTED:\n gui.icon = CONNECTED_ICON\n if not value & APP_STATES.STARTED:\n gui.icon = DEFAULT_ICON\n gui.set_state(value)\n self._state = value\n\n def __step_send_connect(self):\n \"\"\"\n Config loaded, let's create the tunnel\n :return:\n \"\"\"\n\n self.logger.info(\"Sending C command to connect\")\n self._child.sendline('c')\n i = self._child.expect([pexpect.TIMEOUT, TUNNEL_ENABLED, DETACHED])\n\n if i == 0:\n self.logger.error(\"Ikec timeout. Cannot establish tunnel. Retrying\")\n self.__retry_with_sleep(RETRY_SLEEP_DURATION)\n\n if i == 1:\n self.logger.info(\"Tunnel established\")\n self.__monitor_loop()\n\n if i == 2:\n self.logger.info(\"Detached from key daemon\")\n self.__retry_with_sleep(RETRY_SLEEP_DURATION)\n\n def __monitor_loop(self):\n \"\"\"\n Continuously looping every 30 seconds if some new data has come\n :return:\n \"\"\"\n self.logger.info('Monitoring changes of the tunnel')\n self.set_state(APP_STATES.STARTED | APP_STATES.CONNECTED)\n try:\n while True:\n if self._monitor_timer == None and PING_ENABLED:\n self.__create_monitor_thread()\n i = self._child.expect([pexpect.TIMEOUT, DETACHED], timeout=10)\n if i == 0:\n continue\n if i == 1:\n self.set_state(APP_STATES.STARTED | APP_STATES.CONNECTING)\n self.logger.info(\"Tunnel has been closed. Retrying to establish the connection\")\n self.__retry_with_sleep(RETRY_SLEEP_DURATION)\n except Exception as e:\n if not self._state & APP_STATES.STOPPING:\n self.logger.error(\"Quiting monitoring loop due to exception %s\" % e)\n self.disconnect()\n\n def __retry_with_sleep(self, sleep_duration):\n self.logger.debug(\"Waiting %d seconds\" % (sleep_duration))\n time.sleep(sleep_duration)\n self.__step_send_connect()\n\n def __create_monitor_thread(self):\n \"\"\"\n If pinging enabled, this just pings every 20 seconds the host specified in PING_HOST. Works only in nogui mode\n :return:\n \"\"\"\n try:\n self.__ping_host()\n except Exception:\n self.logger.error(\"Exception while pinging host\")\n if self._state & APP_STATES.CONNECTED:\n self._monitor_timer = Timer(20, self.__create_monitor_thread)\n self._monitor_timer.start()\n\n def __ping_host(self):\n\n self.logger.debug(\"Pinging host %s\" % (PING_HOST))\n proc = subprocess.Popen(\"ping -c 1 %s\" % PING_HOST, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n if proc.returncode == 0:\n self.logger.debug(\"Successfully pinged host\")\n else:\n self.logger.error(\"Host %s unreachable\" % (PING_HOST))\n\n def run(self):\n self.set_state(APP_STATES.STARTED)\n self.__execute_binary()\n\n def disconnect(self):\n \"\"\"\n Let's cancel the worker, pexpect and set state to STOPPED\n :return:\n \"\"\"\n self.set_state(APP_STATES.STOPPING)\n if self._child != None:\n self._child.close(force=True)\n if self._monitor_timer != None:\n self._monitor_timer.cancel()\n self.set_state(0)\n\nclass ShrewHelperApp(rumps.App):\n\n def __init__(self, *args, **kwargs):\n super(ShrewHelperApp, self).__init__(*args, **kwargs)\n self._selected_profile = SELECTED_PROFILE\n self.__create_menu_callbacks()\n self.shrew_helper = None\n\n def _create_profile_entry(self, profile):\n \"\"\"\n Prepares menu item for the Selected profile SubMenu\n :param profile:String value of the profile\n :return: rumps.MenuItem\n \"\"\"\n item = rumps.MenuItem(profile)\n item.set_callback(self.profile_callback, profile)\n if profile == self._selected_profile: item.state = True\n return item\n\n def profile_callback(self, sender):\n \"\"\"\n Callback for every profile entry in submenu\n :param sender: profile rumps.MenuItem\n :return:\n \"\"\"\n global SELECTED_PROFILE\n self._selected_profile = sender.key\n for profile, menu_item in self.profiles_entries.items():\n menu_item.state = profile == sender.key\n SELECTED_PROFILE = sender.key\n self.connect_menu_item.title = 'Connect %s' % (sender.key)\n self.connect_menu_item.set_callback(self.connect)\n write_config()\n\n def disable_profiles(self):\n # self.menu[\"Select profile\"][\"Import profile\"].set_callback(None)\n for profile, menu_item in self.profiles_entries.items():\n menu_item.set_callback(None)\n\n def enable_profiles(self):\n # self.menu[\"Select profile\"][\"Import profile\"].set_callback(self.import_profile)\n for profile, menu_item in self.profiles_entries.items():\n menu_item.set_callback(self.profile_callback, profile)\n\n def __create_menu_callbacks(self):\n \"\"\"\n Creates menu in taskbar and sets callbacks\n :return:\n \"\"\"\n\n profiles_dict = {profile: self._create_profile_entry(profile) for profile in self.get_available_profiles()}\n self.profiles_entries = profiles_dict\n profiles = profiles_dict.values()\n\n self.connect_menu_item = rumps.MenuItem(\"Connect %s\" % (self._selected_profile), icon=PLAY_ICON, dimensions=(16, 16))\n self.disconnect_menu_item = rumps.MenuItem(\"Disconnect\", icon=STOP_ICON, dimensions=(16, 16))\n\n self.menu = [\n self.connect_menu_item,\n self.disconnect_menu_item,\n None,\n [rumps.MenuItem(\"Select profile\", icon=SETTINGS_ICON, dimensions=(16, 16)), profiles],\n None,\n rumps.MenuItem(\"About\", icon=ABOUT_ICON, dimensions=(16, 16)),\n [rumps.MenuItem(\"Logging\", icon=SHOWLOG_ICON, dimensions=(16, 16)), [rumps.MenuItem(\"Show log\"), rumps.MenuItem(\"Verbose logging\")]],\n rumps.MenuItem(\"Quit\", icon=CROSS_ICON, dimensions=(16, 16))\n ]\n # self.menu[\"Select profile\"][\"Import profile\"].set_callback(self.import_profile)\n\n if SELECTED_PROFILE is not None and len(SELECTED_PROFILE) > 0:\n self.connect_menu_item.set_callback(self.connect)\n\n self.menu[\"Logging\"][\"Verbose logging\"].state = DEBUG_ENABLED\n\n def connect(self, sender):\n if self.shrew_helper is not None:\n self.shrew_helper.disconnect()\n self.shrew_helper.join()\n self.shrew_helper = ShrewHelperWorker(self._selected_profile)\n self.shrew_helper.start()\n\n def disconnect(self, sender):\n self.shrew_helper.disconnect()\n\n @rumps.clicked(\"About\")\n def about(self, _):\n rumps.alert(\"MacShrew 2016\\nDevelMagic s.r.o.\\nMartin Formanko\\n\\nhttp://github.com/mejmo/mac-shrew\")\n\n @rumps.clicked(\"Quit\")\n def exit(self, _):\n rumps.quit_application()\n\n @rumps.clicked(\"Logging\", \"Show log\")\n def openlog(self, _):\n if os.path.exists(LOGNAME):\n subprocess.call(['open', '-a', 'TextEdit', LOGNAME])\n else:\n fatal('Cannot find the log file')\n\n @rumps.clicked(\"Logging\", \"Verbose logging\")\n def set_debug(self, sender):\n global DEBUG_ENABLED\n sender.state = not sender.state\n if sender.state:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n DEBUG_ENABLED = sender.state\n write_config()\n\n def get_available_profiles(self):\n \"\"\"\n Searches through ~/.ike folder and it tries to find any available profiles in sites subfolder\n :return: sorted list with string names\n \"\"\"\n from os.path import expanduser, isfile, join\n home = expanduser(\"~\")\n if not os.path.isdir(home+\"/.ike\") or not os.path.isdir(home+\"/.ike/sites\"):\n return []\n profiles = [f for f in os.listdir(home + \"/.ike/sites\") if isfile(join(home + \"/.ike/sites\", f))]\n profiles.sort()\n return profiles\n\n def set_state(self, value):\n\n if value & APP_STATES.STARTED or value & APP_STATES.CONNECTING:\n self.menu['Disconnect'].set_callback(self.disconnect)\n self.connect_menu_item.set_callback(None)\n self.disable_profiles()\n else:\n self.disconnect_menu_item.set_callback(None)\n self.connect_menu_item.set_callback(self.connect)\n self.enable_profiles()\n\nclass IkedRunner:\n\n @staticmethod\n def is_running():\n \"\"\"\n Checkes if iked is not already running\n :return:\n \"\"\"\n import commands\n return False if len(commands.getoutput(\"pgrep iked\")) == 0 else True\n\n @staticmethod\n def run_iked():\n \"\"\"\n Creates popup box with login prompt for the administrator username/password. We need iked to run under root\n :return:\n \"\"\"\n os.system(\"osascript -e 'do shell script \\\"%s\\\" with administrator privileges'\" % (IKED_PATH))\n\nclass StreamProxy:\n\n def write(self, msg):\n \"\"\"\n Proxy for pexpect so that we can forward logging into our logger class\n :param msg:\n :return:\n \"\"\"\n logger.debug(\"Ikec output: \\n%s\" % msg)\n\n def flush(self):\n pass\n\ndef signal_handler(signal, frame):\n \"\"\"\n Kills the pinging thread and UI thread as well, when CTRL+C received\n :param signal:\n :param frame:\n :return:\n \"\"\"\n global shrew_helper\n if shrew_helper is not None:\n shrew_helper.disconnect()\n if gui is not None:\n gui.quit_application()\n\n\ndef create_logger():\n \"\"\"\n Logs into stdout and MacShrew.log file simultaneously\n :return: logger object\n \"\"\"\n logger = logging.getLogger('shrew_helper')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\n fh = logging.FileHandler(LOGNAME)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n ch.setLevel(logging.DEBUG)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger\n\ndef fatal(msg):\n\n logger.fatal(msg)\n if not NOGUI:\n rumps.alert(msg)\n\ndef parse_arguments():\n \"\"\"\n Values from config, can be overriden by application arguments\n :return:\n \"\"\"\n global IKEC_PATH, IKED_PATH, NOGUI, SELECTED_PROFILE\n\n parser = argparse.ArgumentParser(description='Client for ShrewSoft VPN with reconnect feature and GUI')\n parser.add_argument(\"-n\", \"--nogui\", action='store_true', default=NOGUI, dest='nogui', help='disable GUI and run in console')\n parser.add_argument(\"-r\", \"--profile\", dest='profile', default=SELECTED_PROFILE, help='(only with --nogui) profile name to be used. It must be located under ~/.ike/sites/')\n parser.add_argument(\"-ic\", \"--ikec\", default=IKEC_PATH, dest='ikecpath', help='path to ikec binary')\n parser.add_argument(\"-id\", \"--iked\", default=IKED_PATH, dest='ikedpath', help='path to iked binary')\n parser.add_argument(\"-p\", \"--pinghost\", dest='pinghost', help='ping this host every 20secs when tunnel is established')\n\n args = parser.parse_args()\n\n if args.nogui and args.profile == '':\n parser.error('You must specify profile name when --nogui is active')\n sys.exit(22)\n\n IKEC_PATH = args.ikecpath\n IKED_PATH = args.ikedpath\n PING_HOST = args.pinghost\n NOGUI = args.nogui\n SELECTED_PROFILE = args.profile\n\n if PING_HOST == None:\n PING_ENABLED = False\n\ndef read_config():\n \"\"\"\n If the file ~/.macshrew/MacShrew.conf does not exist, just copy the default values and load the config\n :return:\n \"\"\"\n global IKEC_PATH, IKED_PATH, SELECTED_PROFILE, DEBUG_ENABLED\n\n config = ConfigParser.ConfigParser()\n if not os.path.isdir(os.path.expanduser(\"~/.macshrew\")):\n os.makedirs(os.path.expanduser(\"~/.macshrew\"))\n if not os.path.isfile(os.path.expanduser(\"~/.macshrew\")+\"/MacShrew.conf\"):\n copyfile(\"resources/conf/MacShrew.conf\", os.path.expanduser(\"~/.macshrew/MacShrew.conf\"))\n config.readfp(open(os.path.expanduser(\"~/.macshrew/MacShrew.conf\")))\n\n SELECTED_PROFILE = config.get(\"UI\", \"Profile\")\n DEBUG_ENABLED = config.getboolean(\"UI\", \"VerboseLogging\")\n\n if len(config.get(\"IKE\", \"ikedpath\", IKED_PATH)) > 0:\n IKED_PATH = config.get(\"IKE\", \"ikedpath\", IKED_PATH)\n\n if len(config.get(\"IKE\", \"ikecpath\", IKEC_PATH)) > 0:\n IKEC_PATH = config.get(\"IKE\", \"ikecpath\", IKEC_PATH)\n\ndef write_config():\n\n config = ConfigParser.RawConfigParser()\n config.add_section(\"UI\")\n config.set(\"UI\", \"Profile\", SELECTED_PROFILE)\n config.set(\"UI\", \"VerboseLogging\", DEBUG_ENABLED)\n config.add_section(\"IKE\")\n config.set(\"IKE\", \"ikedpath\", IKED_PATH)\n config.set(\"IKE\", \"ikecpath\", IKEC_PATH)\n\n with open(os.path.expanduser('~/.macshrew/MacShrew.conf'), 'wb') as configfile:\n config.write(configfile)\n\nif __name__ == \"__main__\":\n\n logger = create_logger()\n\n read_config()\n parse_arguments()\n\n logger.setLevel(logging.DEBUG if DEBUG_ENABLED == True else logging.INFO)\n\n if not os.path.exists(IKEC_PATH):\n fatal('Cannot find ikec binary on path %s. Install ShrewSoft VPN or set the correct ikec path' % IKEC_PATH)\n sys.exit(2)\n\n if not os.path.exists(IKED_PATH):\n fatal('Cannot find iked binary on path %s. Install ShrewSoft VPN or set the correct iked path' % IKED_PATH)\n sys.exit(2)\n\n try:\n if not IkedRunner.is_running():\n logger.info(\"Iked is not running. Starting daemon\")\n IkedRunner.run_iked()\n except Exception as e:\n fatal(\"Cannot start iked daemon: %s\" % (e))\n sys.exit(3)\n\n if not IkedRunner.is_running():\n fatal(\"Cannot start iked\")\n sys.exit(3)\n\n rumps.debug_mode(RUMPS_DEBUG)\n\n if NOGUI:\n app = ShrewHelperWorker()\n app.run()\n else:\n gui = ShrewHelperApp(\"ShrewMac\", icon=DEFAULT_ICON, quit_button=None)\n gui.run()\n\n\n\n\n\n\n\n","repo_name":"mejmo/mac-shrew","sub_path":"MacShrew.py","file_name":"MacShrew.py","file_ext":"py","file_size_in_byte":17201,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"62"} +{"seq_id":"23969569335","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom datetime import date\nfrom fontTools import ttLib\n\nimport io\nimport json\nimport os\nimport sys\nimport gzip\nfrom bakery_cli.fonts_public_pb2 import FontProto, FamilyProto\nfrom google.protobuf import text_format\nfrom bakery_cli.nameid_values import *\n\nif sys.version < '3':\n import codecs\n def u(x):\n if not x:\n return ''\n return codecs.unicode_escape_decode(x)[0]\nelse:\n def u(x):\n return x\n\n# This is only here to have the JSON file data written in a predictable way\n# We only care about the the json object being able to iterate over the keys, so\n# other stuff might be broken...\nMETADATA_PB = 'METADATA.pb'\nMETADATA_PB_NEW = 'METADATA.pb.new'\n\n\ndef check_regular(filename):\n fontdata = fontToolsOpenFont(filename)\n isRegular = True\n\n if fontdata['OS/2'].fsSelection & 0b10001:\n isRegular = False\n if fontdata['head'].macStyle & 0b11:\n isRegular = False\n\n return fontdata['OS/2'].usWeightClass == 400 and isRegular\n\nif sys.version_info[0] < 3:\n def unicode(str):\n return str.decode('utf-8')\n\n\ndef listdir(familydir):\n files = []\n for dirpath, dirnames, filenames in os.walk(familydir):\n files += [os.path.join(dirpath, fn)\n for fn in filenames if unicode(fn.lower()).endswith('.ttf')]\n return files\n\n\nclass InsertOrderedDict(dict):\n\n def __init__(self):\n dict.__init__(self)\n self.orderedKeys = []\n\n def __setitem__(self, key, item):\n dict.__setitem__(self, key, item)\n if key not in self.orderedKeys:\n self.orderedKeys.append(key)\n\n def __delitem__(self, key):\n dict.__delitem__(self, key)\n self.orderedKeys.remove(key)\n\n def clear(self):\n dict.clear(self)\n self.orderedKeys = []\n\n def copy(self):\n dictCopy = InsertOrderedDict()\n for key in self.orderedKeys:\n dictCopy[key] = dict.get(self, key)\n return dictCopy\n\n def keys(self):\n return self.orderedKeys\n\n def items(self):\n return [(key, dict.get(self, key)) for key in self.orderedKeys]\n\n def iteritems(self):\n return iter(list(self.items()))\n\n def iterkeys(self):\n return iter(self.orderedKeys)\n\n # That's definitely a mess, but doing our best\n def update(self, dictionary=None, **kwargs):\n for key in dictionary.keys():\n if key not in self.orderedKeys:\n self.orderedKeys.append(key)\n if len(kwargs):\n for key in kwargs:\n if key not in self.orderedKeys:\n self.orderedKeys.append(key)\n dict.update(self, dictionary, **kwargs)\n\n def pop(self, key, *args):\n self.orderedKeys.remove(key)\n return dict.pop(self, key, *args)\n\n def __getattr__(self, key):\n return dict.get(self, key)\n\n def popitem(self):\n if self.orderedKeys:\n return self.pop(self.orderedKeys[0])\n return dict.popitem(self) # should raise KeyError\n\n\nSUPPORTED_SUBSETS = frozenset([\n \"menu\",\n \"arabic\",\n \"armenian\",\n \"balinese\",\n \"bengali\",\n \"burmese\",\n \"cherokee\",\n \"cyrillic\",\n \"cyrillic-ext\",\n \"ethiopic\",\n \"georgian\",\n \"greek\",\n \"greek-ext\",\n \"gujarati\",\n \"hebrew\",\n \"hindi\",\n \"japanese\",\n \"javanese\",\n \"kannada\",\n \"khmer\",\n \"korean\",\n \"lao\",\n \"latin\",\n \"latin-ext\",\n \"malayalam\",\n \"oriya\",\n \"osmanya\",\n \"sinhala\",\n \"tamil\",\n \"telugu\",\n \"thai\",\n \"tibetan\",\n \"vietnamese\",\n \"devanagari\"\n])\n\n# DC This should check the NAME table for correct values of the license\n# and licenseurl keys\n\n\ndef inferLicense(familydir):\n from bakery_cli.utils import UpstreamDirectory\n directory = UpstreamDirectory(familydir)\n\n if not directory.LICENSE:\n return \"\"\n\n with open(os.path.join(familydir, directory.LICENSE[0])) as fp:\n content = fp.read()\n if 'Apache License' in content:\n return 'Apache2'\n if 'SIL Open Font License, Version 1.1' in content:\n return 'OFL'\n if 'UBUNTU FONT LICENCE Version 1.0' in content:\n return 'UFL'\n return \"\"\n\n# DC This should check the italicangle matches the other ways italic can\n# be seen - filename, full name, psname, macstyle, others?\n\n\ndef inferStyle(ftfont):\n if ftfont['post'].italicAngle == 0.0:\n return \"normal\"\n return \"italic\"\n\n# DC This should check both names match, and match across the family\n\n\ndef inferFamilyName(familydir):\n files = listdir(familydir)\n familyName = \"\"\n styleName = \"\"\n for f in files:\n if check_regular(f):\n ftfont = fontToolsOpenFont(f)\n for record in ftfont['name'].names:\n if record.nameID == NAMEID_FONT_FAMILY_NAME:\n familyName = record.toUnicode()\n\n # Some authors creates TTF with wrong family name including styles\n if record.nameID == NAMEID_FONT_SUBFAMILY_NAME:\n styleName = record.toUnicode()\n\n familyName = familyName.replace(styleName, '').strip()\n\n if familyName == \"\":\n string = \"FATAL: No *-Regular.ttf found to set family name!\"\n color = \"red\"\n ansiprint(string, color)\n return \"UNKNOWN\"\n else:\n return familyName\n\n\ndef fontToolsOpenFont(filepath):\n f = io.open(filepath, 'rb')\n try:\n return ttLib.TTFont(f)\n except:\n print(filepath)\n raise\n\n\n# DC This should check both copyright strings match\ndef fontToolsGetCopyright(ftfont):\n copyright = \"\"\n for record in ftfont['name'].names:\n if record.nameID == NAMEID_COPYRIGHT_NOTICE:\n copyright = record.toUnicode()\n if len(copyright) > 0:\n return copyright\n print(\"ER: no copyright string found.\")\n \n #When no Copyright info is found, we must assume the\n # implicit 'All Rights Reserved' status\n return \"All rights reserved\"\n\n# DC This should check both names match, and stems match across the family\n\n\ndef fontToolsGetPSName(ftfont):\n psName = \"\"\n for record in ftfont['name'].names:\n if record.nameID == NAMEID_POSTSCRIPT_NAME:\n psName = record.toUnicode()\n if len(psName) > 0:\n return psName\n # DC What happens if there is no PSName set?\n\n print(\"ER: no PSName string found!\")\n return \"\"\n\n# DC This should check both names match, and stems match across the\n# family, and italic/bold match other metadata (weight, macstyle,\n# italicangle)\n\n\ndef fontToolsGetFullName(ftfont):\n fullName = \"\"\n for record in ftfont['name'].names:\n if record.nameID == NAMEID_FULL_FONT_NAME:\n fullName = record.toUnicode()\n if len(fullName) > 0:\n return fullName\n \n print(\"ER: no fullname string found!\")\n return \"\"\n\n# DC This should check both names match, and is found in designers.json\n\n\ndef fontToolsGetDesignerName(ftfont):\n designerName = \"\"\n for record in ftfont['name'].names:\n if record.nameID == NAMEID_DESIGNER:\n designerName = record.toUnicode()\n if len(designerName) > 0:\n return designerName\n\n print(\"ER: no DesignerName string found!\")\n return \"\"\n\n# DC This should check both names match\n\n\ndef fontToolsGetDesc(ftfont):\n fontDesc = \"\"\n for record in ftfont['name'].names:\n if record.nameID == NAMEID_DESCRIPTION:\n fontDesc = record.toUnicode()\n if len(fontDesc) > 0:\n return fontDesc\n\n print(\"ER: no fontDesc string found!\")\n return \"\"\n\n# DC NameIDs are as follows:\n# required marked *\n# 0 Copyright notice.\n# * 1 Family name\n# * 2 Font Subfamily name (should matcht the OS/2.fsSelection bit - eg, fsSelection bit 6 set = Regular)\n# * 4 Full name\n# 5 Version string (Should be 'Version .' Caps with a space between “Version” and the number; one or more digits (0-9) of value less than 65535 followed by period followed by one or more digits of value less than 65535; Any character other than a digit will terminate the minor number and act as comment string “;” is sometimes used)\n# * 6 Postscript name (Must have Platform: 1 [Macintosh]; Platform-specific encoding: 0 [Roman]; Language: 0 [English] and Platform: 3 [Windows]; Platform-specific encoding: 1 [Unicode]; Language: 0x409 [English (American)] and any nameID=6s other than those are out of spec; both must be identical; no longer than 63 characters; and restricted to the printable ASCII subset, codes 33 through 126; identical to the font name as stored in the CFF's Name INDEX;\n# 7 Trademark\n# 8 Manufacturer Name.\n# 9 Designer Name\n# 10 Description\n# 11 URL Vendor (should have http://)\n# 12 URL Designer (should have http://)\n# 13 License Description\n# 14 License URL\n# 16 Preferred Family; must be different to ID 1 but make sense\n# 17 Preferred Subfamily; must be different to ID 2, and unique in the Prefered Family\n# 18 Compatible Full (Macintosh only); matches the Full Name\n# 19 Sample text (best sample to display the font in)\n\n\ndef createFonts(familydir, metadata):\n familyname = metadata.name\n fonts = []\n files = listdir(familydir)\n for f in files:\n fontmetadata = FontProto()\n ftfont = fontToolsOpenFont(f)\n fontmetadata.name = u(familyname)\n fontmetadata.post_script_name = u(fontToolsGetPSName(ftfont))\n fontmetadata.full_name = u(fontToolsGetFullName(ftfont))\n fontmetadata.style = u(inferStyle(ftfont))\n fontmetadata.weight = ftfont['OS/2'].usWeightClass\n fontmetadata.filename = os.path.basename(unicode(f).lstrip('./'))\n fontmetadata.copyright = u(fontToolsGetCopyright(ftfont))\n fonts.append(fontmetadata)\n\n metadata.fonts.extend(sorted(fonts, key = lambda f: f.weight))\n\n\n# DC This should also print the subset filesizes and check they are\n# smaller than the original ttf\n\ndef inferSubsets(familydir, metadata):\n subsets = set()\n files = listdir(familydir)\n for f in files:\n index = unicode(f).rfind(\".\")\n if index != -1:\n extension = unicode(f)[index + 1:]\n if extension in SUPPORTED_SUBSETS:\n subsets.add(extension)\n if len(subsets) == 0:\n subsets = [\"latin\"]\n\n metadata.subsets.extend(sorted(subsets))\n\n\ndef getDesigner(familydir):\n files = listdir(familydir)\n for f in files:\n if check_regular(f): # DC should ansiprint red if no Reg exemplar\n ftfont = fontToolsOpenFont(f)\n desName = fontToolsGetDesignerName(ftfont)\n if isinstance(desName, str):\n string = u\"Designer's name from font is: \" + u(desName)\n color = \"green\"\n ansiprint(string, color)\n return u(desName)\n else:\n desName = \"Multiple Designers\"\n ansiprint(\n \"No Designer Name known, using Multiple Designers for now...\", \"red\")\n return desName\n\n\ndef check_monospace(familydir):\n files = listdir(familydir)\n glyphwidths = []\n for f in files:\n if not unicode(f).endswith('.ttf'):\n continue\n font = fontToolsOpenFont(unicode(f))\n for table in font['cmap'].tables:\n if not (table.platformID == 3 and table.platEncID in [1, 10]):\n continue\n\n for glyphname in table.cmap:\n try:\n glyphwidths.append(font['hmtx'][glyphname][0])\n except (IndexError, KeyError):\n # can't read hmtx for glyphname, append value of zero\n glyphwidths.append(0)\n # if all glyphs has the same widths then it is easy to check\n # by casting list to python sets.\n return len(set(glyphwidths)) == 1\n\ndef genmetadata(familydir):\n metadata = FamilyProto()\n metadata.name = inferFamilyName(familydir)\n metadata.designer = getDesigner(familydir) # DC Should check it against profiles.json\n metadata.license = inferLicense(familydir)\n\n # DC Should get this from the font or prompt?\n if check_monospace(familydir):\n metadata.category = 'monospace'\n else:\n metadata.category = ''\n\n createFonts(familydir, metadata)\n inferSubsets(familydir, metadata)\n\n # DC This is used for the Date Added sort in the GWF\n # Directory - DC to check all existing values in hg repo are correct\n metadata.date_added = getToday()\n\n return metadata\n\n\ndef getToday():\n return str(date.today().strftime(\"%Y-%m-%d\"))\n\n\ndef hasMetadata(familydir):\n fn = os.path.join(familydir, METADATA_PB)\n return os.path.exists(fn) and (os.path.getsize(fn) > 0)\n\ndef striplines(jsontext):\n lines = jsontext.split(\"\\n\")\n newlines = []\n for line in lines:\n newlines.append(u\"%s\\n\" % (line.rstrip()))\n return u\"\".join(newlines)\n\n\ndef writeFile(familydir, metadata):\n filename = METADATA_PB\n if hasMetadata(familydir):\n filename = METADATA_PB_NEW\n\n msg_str = text_format.MessageToString(metadata, as_utf8=True)\n open(os.path.join(familydir, filename), 'w').write(msg_str)\n\n\ndef ansiprint(string, color):\n if sys.stdout.isatty():\n attr = []\n if color == \"green\":\n attr.append('32') # green\n attr.append('1') # bold\n else:\n attr.append('31') # red\n attr.append('1') # bold\n print('\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string))\n else:\n print(string)\n\n\ndef writeDescHtml(familydir):\n filename = \"DESCRIPTION.en_us.html\"\n file_path = os.path.join(familydir, filename)\n if os.path.exists(file_path):\n ansiprint('File \"{}\" exists'.format(file_path), \"green\")\n return\n\n foundRegular = False\n files = listdir(familydir)\n for f in files:\n if check_regular(f):\n foundRegular = True\n filepath = os.path.join(familydir, f)\n ftfont = fontToolsOpenFont(filepath)\n fontDesc = fontToolsGetDesc(ftfont)\n break\n\n if not foundRegular:\n string = \"No Regular found! REMEMBER! Create a \" + filename\n ansiprint(string, \"red\")\n fontDesc = \"TODO\"\n\n descHtml = u\"

    \" + u(fontDesc) + u\"

    \"\n with io.open(os.path.join(familydir, filename), 'w', encoding=\"utf-8\") as f:\n f.write(descHtml)\n\n\ndef run(familydir):\n writeDescHtml(familydir)\n writeFile(familydir, genmetadata(familydir))\n","repo_name":"amir17688/google_dataset","sub_path":"genmetadata.py","file_name":"genmetadata.py","file_ext":"py","file_size_in_byte":14534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72844731076","text":"#!/usr/bin/python3\n\"\"\"A module that contais the class square\n\"\"\"\n\n\nclass Square:\n \"\"\"A class with information about a square\n \"\"\"\n\n def __init__(self, size=0):\n \"\"\"initialization of function\n Args:\n size (int, optional): Size of the square.\n \"\"\"\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n\n @property\n def size(self):\n \"\"\"Property that returns size\n \"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"Setter method for size\n \"\"\"\n if type(value) != int:\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n def area(self):\n \"\"\"Method that returns the area of the square\n \"\"\"\n return (self.__size * self.__size)\n\n def my_print(self):\n \"\"\"Method that prints a square of #'s the size of the square\n \"\"\"\n if self.__size == 0:\n print(\"\")\n return\n for i in range(self.__size):\n for j in range(self.__size):\n print(\"#\", end='')\n print(\"\")\n\nif __name__ == \"__main__\":\n my_square = Square(3)\n my_square.my_print()\n\n print(\"--\")\n\n my_square.size = 10\n my_square.my_print()\n\n print(\"--\")\n\n my_square.size = 0\n my_square.my_print()\n\n print(\"--\")\n","repo_name":"BenDosch/holbertonschool-higher_level_programming","sub_path":"0x06-python-classes/5-square.py","file_name":"5-square.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73762779076","text":"from collections import Counter, defaultdict\r\nimport copy\r\nfrom typing import List\r\nfrom leetcode_py.test297 import Codec\r\n# Definition for a binary tree node.\r\nclass TreeNode:\r\n def __init__(self, val=0, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\n\r\n\r\nfrom functools import reduce\r\nfrom collections import Counter\r\n\r\nclass Solution:\r\n def findUnsortedSubarray(self, nums: List[int]) -> int:\r\n if not nums or len(nums) <= 1:\r\n return 0\r\n s = []\r\n idxl, idxr = len(nums)-1, 0\r\n maxleft = -1e10\r\n if nums[1] < nums[0]:\r\n idxl, idxr = 0, 1\r\n for i in range(1,len(nums)):\r\n if nums[i-1] > nums[i]:\r\n idxl = min(idxl, i-1)\r\n idxr = max(idxr, i)\r\n maxleft = max(maxleft, nums[i-1])\r\n elif nums[i-1] == nums[i] and nums[i] < maxleft:\r\n idxr += 1\r\n if idxr > idxl:\r\n return idxr - idxl + 1\r\n return 0\r\n # idxl, needfirst = 0, True\r\n # idxr = 0\r\n # for i in range(0, len(nums)):\r\n # while s and nums[i] <= s[-1][0]:\r\n # if needfirst:\r\n # idxl = s[-1][1]\r\n # needfirst = False\r\n # idxr = i\r\n # s.pop(-1)\r\n # s.append((nums[i], i))\r\n # if idxr == 0:\r\n # return 0\r\n # return idxr - idxl + 1\r\n\r\n\r\nfrom collections import defaultdict\r\nif __name__ == '__main__':\r\n s = Solution()\r\n c = Codec()\r\n root, t = [10,5,-3,3,2,None,11,3,-2,None,1], 8\r\n # root, t = [5,4,8,11,None,13,4,7,2,None,None,5,1], 22\r\n # root, t = [0,1,1], 0\r\n l = [2,3,3,2,4]\r\n c = Counter(l)\r\n root = c.deserialize2(root)\r\n # l = [0,1,0,2,1,0,1,3,2,1,2,1]\r\n r = s.findUnsortedSubarray(l)\r\n print(r)\r\n","repo_name":"lkjie/leetcode","sub_path":"leetcode_py/test581.py","file_name":"test581.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2987373056","text":"import os\n\nfrom django.http import UnreadablePostError\n\ndef info_only(record):\n if record.levelname==\"INFO\":\n return True\n return False\n\nADMINS = (\n ('grace', 'tianweigrace@qq.com'),\n)\n\nMANAGERS = ADMINS\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',\n 'style': '{',\n },\n 'simple': {\n 'format': '{levelname} {asctime} {module} {message}',\n 'style': '{',\n },\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n 'server_infos_only': {\n '()': 'django.utils.log.CallbackFilter',\n 'callback': info_only,\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'formatter': 'simple'\n },\n 'file_errors': {\n 'level': 'ERROR',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/errors.log',\n 'formatter': 'simple'\n \n },\n \n 'file_infos': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/infos.log',\n \n },\n 'server_infos': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filters': ['server_infos_only'],\n 'filename': 'logs/server_infos.log',\n 'formatter': 'simple'\n },\n 'article_infos': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/article_infos.log',\n 'formatter': 'simple'\n },\n 'mailapi_infos': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/mailapi_infos.log',\n 'formatter': 'simple'\n },\n 'image_infos': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/image_infos.log',\n 'formatter': 'simple'\n },\n 'request_warnings': {\n 'level': 'WARNING',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/request_warnings.log',\n 'formatter': 'simple'\n },\n 'request_errors': {\n 'level': 'ERROR',\n 'class': 'logging.FileHandler',\n 'filename': 'logs/request_errors.log',\n \n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file_infos'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['request_warnings'],\n 'level': 'WARNING',\n 'propagate': False,# info recorded here,and will not be propagated to django logger\n },\n 'django.server': {\n 'handlers': ['server_infos'],\n 'level': 'INFO',\n 'propagate': False,# info recorded here,and will not be propagated to django logger\n },\n 'mysite.error': {\n 'handlers': ['file_errors', ],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),\n 'propagate': False\n },\n 'mysite.article.info': {\n 'handlers': ['article_infos', ],\n 'level': 'INFO', \n 'propagate': False\n },\n 'mysite.mailapi.info': {\n 'handlers': ['mailapi_infos', ],\n 'level': 'INFO', \n 'propagate': False\n },\n 'mysite.image.info': {\n 'handlers': ['image_infos', ],\n 'level': 'INFO', \n 'propagate': False\n },\n 'myproject.custom': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'INFO',\n }\n }\n}\n","repo_name":"tianwei1992/mysite","sub_path":"mysite/logging_settings.py","file_name":"logging_settings.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"ar","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"36410843709","text":"import requests\nimport jmespath\n\n# PLIK: check_api.py\ndef main():\n \"\"\"\n \"\"\"\n try:\n r = requests.get(\"http://127.0.0.1:5000?output=json\")\n except requests.exceptions.RequestException as e:\n print(e)\n exit(1)\n if r.status_code != 200:\n print(\"FAILED: wywolanie nie powiodlo sie: \" + r.text)\n exit(2)\n print(r.text)\n print(r.json())\n checkOutput(r.json())\n\n\ndef checkOutput(rj):\n actual = jmespath.search('imie', rj)\n expected = \"AgnieszkaR\"\n if actual != expected:\n print(\"FAILED: We expected: \" + expected + \" but was \" + actual)\n exit(1)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n \"\"\"\n main()\n","repo_name":"akola2017/se_hello_printer_app","sub_path":"test-api/check_api.py","file_name":"check_api.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"62"} +{"seq_id":"14932094786","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.patches import ConnectionPatch\r\nfrom collections import Counter\r\nfrom scipy.optimize import linear_sum_assignment\r\nimport math\r\n\r\n\r\nimg1 = 'S3_016_02_05.jpg'\r\nimg2 = 'S3_016_02_06.jpg'\r\n\r\nimages = []\r\n\r\ncount = []\r\nmode = 0\r\n\r\ndef detect_pill(img):\r\n img = cv2.imread(img,0)\r\n img = img[115:760, 50:980].copy()\r\n img = cv2.medianBlur(img,5)\r\n\r\n dst = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\r\n cv2.THRESH_BINARY,31,2)\r\n\r\n dst = cv2.medianBlur(dst,3)\r\n\r\n circles = cv2.HoughCircles(dst, cv2.HOUGH_GRADIENT,1,11,\r\n param1=5,param2=13,minRadius=7,maxRadius=20)\r\n circles = np.uint16(np.around(circles))\r\n\r\n points = []\r\n for i in circles[0,:]:\r\n points.append([i[0],i[1]])\r\n \r\n count.append(len(points))\r\n\r\n images.append(dst)\r\n return points\r\n\r\ndef get_dist(points1, points2):\r\n distance = []\r\n\r\n for i in range(len(points1)):\r\n x1 = points1[i][0]\r\n y1 = points1[i][1]\r\n p1 = points1[i]\r\n\r\n dst = []\r\n for j in range(len(points2)):\r\n x2 = points2[j][0]\r\n y2 = points2[j][1]\r\n p2 = points2[j]\r\n\r\n # print(x1, y1, x2, y2)\r\n \r\n dst.append(int(math.dist(p1, p2)))\r\n distance.append(dst)\r\n #distance[i].append(math.sqrt((x2 - x1)**2 + (y2 - y1)**2))\r\n\r\n return distance\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n # import_pill_group(64, 1)\r\n p1 = detect_pill(img1)\r\n p2 = detect_pill(img2)\r\n print(p1)\r\n print(p2)\r\n print(\"=============================\")\r\n dst = get_dist(p1, p2)\r\n print(dst)\r\n row_ind, col_ind = linear_sum_assignment(dst)\r\n print(\"=============================\")\r\n\r\n #plt.subplot(1,2,j+1), plt.imshow(images[j], 'gray')\r\n fig = plt.figure(figsize=(10,5))\r\n ax1 = fig.add_subplot(121)\r\n plt.imshow(images[0], 'gray')\r\n ax2 = fig.add_subplot(122)\r\n plt.imshow(images[1], 'gray')\r\n \r\n for i in range(16):\r\n xyA = (p1[i][0], p1[i][1])\r\n xyB = (p2[col_ind[i]][0], p2[col_ind[i]][1])\r\n con = ConnectionPatch(xyA=xyA, xyB=xyB, coordsA=\"data\", coordsB=\"data\",\r\n axesA=ax2, axesB=ax1, color=((i*15)/255,0,0))\r\n ax2.add_artist(con)\r\n plt.show()\r\n","repo_name":"icarusicarus/PillCounter","sub_path":"plt_show.py","file_name":"plt_show.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"30040958604","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def __init__(self):\n self.nums = []\n\n def helper(self, node):\n if node == None:\n return\n self.helper(node.left)\n self.nums.append(node.val)\n self.helper(node.right)\n\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n self.helper(root)\n l = len(self.nums)\n for i in range(1, l):\n if self.nums[i - 1] >= self.nums[i]:\n return False\n return True\n","repo_name":"tianlu1677/leetcode","sub_path":"ValidateBinarySearchTree/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17987095578","text":"#在......上填写一段代码\r\n\r\ndef reverse_dict(dic):\r\n nd = {}\r\n for i in dic.items():\r\n nd[i[1]] = i[0]\r\n a = list(nd)\r\n a = sorted(a, reverse=True)\r\n res = {}\r\n for k in a:\r\n res[k] = nd[k]\r\n print(k, nd[k])\r\n return res\r\n#请输入一个字典\r\n# dic = eval(input(\"\"))\r\ndic = eval('{\"alice\":1001,\"john\":1003,\"kate\":1002}')\r\nreverse_dict(dic)","repo_name":"jellyqwq/PictureBed","sub_path":"2022/12/NCRE2/JP10/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8136907381","text":"import pickle\nfrom textwrap import wrap\n\nfrom matplotlib import pyplot as plt\n\n\ndef plot3Data(*lists, title):\n for (l, label) in list(lists)[0]:\n plt.plot(l, label=label)\n\n\n plt.ylim(bottom=0, top=1)\n plt.xlim(left=1)\n plt.title(\"\\n\".join(wrap(title,60)))\n plt.ylabel(\"map\")\n plt.xlabel(\"k\")\n plt.legend(loc='upper right')\n plt.savefig(title + \".jpg\")\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n\n\n VGG16Paths = [\n \"exactSearchmAPCombinedVGG16block3_pool.txt\",\n \"exactSearchmAPCombinedVGG16block4_pool.txt\",\n \"exactSearchmAPCombinedVGG16block5_pool.txt\"\n ]\n VGG19Paths = [\n \"exactSearchmAPCombinedVGG19block3_pool.txt\",\n \"exactSearchmAPCombinedVGG19block4_pool.txt\",\n \"exactSearchmAPCombinedVGG19block5_pool.txt\"\n ]\n\n datasVGG16 = []\n\n for path in VGG16Paths:\n data = None\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n datasVGG16.append(data)\n\n datasVGG19 = []\n\n for path in VGG19Paths:\n data = None\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n datasVGG19.append(data)\n\n listsVGG16 = [\n (datasVGG16[0], \"Exact Search from block 3\"),\n (datasVGG16[1], \"Exact Search from block 4\"),\n (datasVGG16[2], \"Exact Search from block 5\")\n ]\n listsVGG19 = [\n (datasVGG19[0], \"Exact Search from block 3\"),\n (datasVGG19[1], \"Exact Search from block 4\"),\n (datasVGG19[2], \"Exact Search from block 5\")\n ]\n plot3Data(listsVGG16,\n title=\"BruteForce VGG16 250 Queries \")\n plot3Data(listsVGG19,\n title=\"BruteForce VGG19 250 Queries \")\n","repo_name":"GabMartino/Retrieve","sub_path":"RetrievalNoFineTuning/ExactSearchWithDistractor/PlotExactSearchMAP.py","file_name":"PlotExactSearchMAP.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"35592922149","text":"import os\nimport sys\nimport tempfile\nfrom unittest.mock import call, patch\n\nimport pytest\n\nimport package_statistic\n\n\ndef test_correct_arch():\n assert package_statistic.parse_arch([\"test\", \"arm64\"]) == \"arm64\"\n\n\ndef test_incorect_arch():\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n package_statistic.parse_arch([\"test\", \"dfsd\"])\n assert pytest_wrapped_e.value.code == 1\n\n\ndef test_process_content_file():\n expected_result = {\"mate-applets-common\\n\": 2, \"mate-user-guide\\n\": 1}\n with open(\"content_file_test.txt\") as content_file:\n result = package_statistic.process_content_file(content_file)\n assert expected_result == result\n\n\n@patch(\"builtins.print\")\ndef test_std_output(mocked_print):\n with open(\"content_file_test.txt\") as content_file:\n result = package_statistic.process_content_file(content_file)\n package_statistic.print_top_10_packages(result)\n assert mocked_print.mock_calls == [\n call(\"1. mate-applets-common\\n\\t2\"),\n call(\"2. mate-user-guide\\n\\t1\"),\n ]\n\n\ndef test_download_file():\n with tempfile.TemporaryDirectory() as tempdir:\n arch = \"arm64\"\n try:\n file_path = package_statistic.download_file(tempdir, arch)\n except Exception:\n pytest.fail(\"Could not download the file\")\n\n assert os.path.isfile(file_path)\n\n\n@patch(\"builtins.print\")\ndef test_end_to_end(mocked_print):\n sys.argv = [\"test\", \"arm64\"]\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n package_statistic.main()\n assert pytest_wrapped_e.value.code == 0\n assert len(mocked_print.mock_calls) == 10\n","repo_name":"pievalentin/debian-package-statistic","sub_path":"package_statistic_test.py","file_name":"package_statistic_test.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"44040084354","text":"\"\"\"\n Working with docx files\n\"\"\"\n\nimport logging\n\nimport docx\n\n\ndef main() -> None:\n \"\"\"\n Work with docx formatted files\n \"\"\"\n # Examining the document meta data. Not all docx generating programs\n # produce metadata about the files\n doc = docx.Document(\"document-1.docx\")\n logging.info(f\"title: {doc.core_properties.title}\")\n logging.info(f\"Key words: {doc.core_properties.keywords}\")\n logging.info(f\"Last modified {doc.core_properties.modified}\")\n\n # Data of the document are stored within paragraphs, and not pages\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n main()\n","repo_name":"vmarcella/auto-python","sub_path":"files/docs.py","file_name":"docs.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"28833987800","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport xml.etree.ElementTree as ET\nimport subprocess\n\nVERSION = '0.0.1'\n\ndef perror(*args):\n sys.stderr.write(*args)\n sys.stderr.write('\\n')\n\ndef output2xml(strings):\n # 行単位にパースして辞書形式にする\n dicts = [read_line(line) for line in strings.split('\\n') if line != '']\n\n # 同ファイル名毎にリスト化\n files = {}\n for dic in dicts:\n if dic['file'] not in files:\n files[dic['file']] = []\n files[dic['file']].append(dic)\n\n # XMLのツリーに変換\n root = ET.Element('checkstyle')\n for file in files.keys():\n file_element = ET.SubElement(root, 'file', attrib={'name':file})\n for dic in files[file]:\n file_element.append(line2element(dic))\n\n return root\n\ndef read_line(line):\n result = dict(zip(['severity', 'type', 'source', 'file', 'line', 'column', 'message_type', 'message'], line.split('|')))\n result['severity'] = result['severity'].lower()\n return result\n\ndef line2element(dic):\n \"\"\"\n \n \"\"\"\n attributes = {k: v for k, v in dic.items() if k in ['line', 'column', 'severity', 'message', 'source']}\n element = ET.Element('error', attrib=attributes)\n return element\n\n# Help\ndef show_help():\n perror('Usage: dartcop [options...] ')\n perror(' dartcop --version')\n perror('')\n perror('dartcop')\n perror('Homepage: https://github.com/kuronekomichael/dartcop')\n perror('Simple `dartanalyzer` wrapper convert to checkstyle format')\n exit(255)\n\ndef main(argv):\n if len(argv) == 0:\n return False\n\n if argv[0] == '-V' or argv[0] == '--version':\n print('dartcop v' + VERSION)\n exit(0)\n\n if argv[0] == 'help':\n show_help()\n exit(0)\n\n try:\n subprocess.check_output(['which', 'dartanalyzer'], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as cpe:\n perror('ERROR!!')\n perror('dartanalyzer not found. Install Dart SDK and add it to PATH.')\n exit(1)\n\n if any([v == '--format=human' for v in argv]):\n perror('ERROR!!')\n perror('Cannot set --format=human.')\n exit(1)\n\n try:\n ret = subprocess.check_output(['dartanalyzer', '--format=machine'] + argv, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as cpe:\n ret = cpe.output\n\n xml = output2xml(ret.decode('utf-8'))\n print(ET.tostring(xml, encoding='unicode'))\n exit(0)\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"kuronekomichael/dartcop","sub_path":"src/dartcop/dartcop.py","file_name":"dartcop.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"73957501958","text":"import pandas as pd\n\nclass CafeCrawler:\n \"\"\"\n 네이버 카페 게슬 정보 크롤러\n\n 객체 생성시 HTML source 의 경로를 지정해주면\n 해당 파일을 기반으로 self.df 변수에 Dataframe 이 저장된다.\n \"\"\"\n\n def __init__(self,path):\n f= open(path, 'r', encoding=\"UTF-8\")\n self.source=f.read()\n self.df = pd.Dataframe()\n\n def generate_dataframe(self):\n self._split_number()\n self._split_title()\n self._split_writer()\n self._split_date()\n self._split_view()\n self._split_like()\n\n self.df.set_index('게시글번호', inplace=True)\n\n return self.df\n\nfrom crawler import CafeCrawler\n\ncrawler = CafeCrawler(path='html_source.txt')\nprint(crawler.generate_dataframe())","repo_name":"cocofafa/python_AI_study","sub_path":"20160624/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4196062762","text":"import tensorflow as tf\nimport numpy as np\n\nfrom realsafe.attack.base import Attack\nfrom realsafe.attack.utils import get_xs_ph, get_ys_ph\nfrom realsafe.model import ClassifierDifferentiable\n\n\nclass CW(Attack):\n '''\n l_2, l_inf\n optimized\n '''\n\n def __init__(self, model, batch_size, goal, distance_metric, learning_rate,\n confidence):\n assert isinstance(model, ClassifierDifferentiable)\n Attack.__init__(self, model, batch_size)\n\n self.confidence = confidence\n\n def scale(vec, dst_lo, dst_hi, src_lo, src_hi):\n k = (dst_hi - dst_lo) / (src_hi - src_lo)\n b = dst_lo - k * src_lo\n return k * vec + b\n\n def scale_to_model(vec):\n return scale(vec, self.model.x_min, self.model.x_max, -1.0, 1.0)\n\n def scale_to_tanh(vec):\n return scale(vec, 1e-6 - 1, 1 - 1e-6,\n self.model.x_min, self.model.x_max)\n\n model_xs_shape = (self.batch_size, *self.model.x_shape)\n\n xs_shape = (self.batch_size, np.prod(self.model.x_shape))\n\n xs_zeros = tf.zeros(xs_shape, dtype=self.model.x_dtype)\n\n self.xs_ph = get_xs_ph(self.model, self.batch_size)\n self.ys_ph = get_ys_ph(self.model, self.batch_size)\n self.cs_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))\n\n xs_var = tf.Variable(xs_zeros)\n ys_var = tf.Variable(tf.zeros_like(self.ys_ph))\n cs_var = tf.Variable(tf.zeros_like(self.cs_ph))\n\n d_ws = tf.Variable(xs_zeros)\n ws = tf.atanh(scale_to_tanh(xs_var)) + d_ws\n\n self.xs_adv = scale_to_model(tf.tanh(ws))\n self.xs_adv_output = tf.reshape(self.xs_adv, model_xs_shape)\n\n logits, _ = self.model.logits_and_labels(self.xs_adv_output)\n\n ys_one_hot = tf.one_hot(ys_var, self.model.n_class)\n\n logit_target = tf.reduce_sum(ys_one_hot * logits, 1)\n logit_other = (1 - ys_one_hot) * logits\n logit_other = logit_other - 0.5 * self.model.x_dtype.max * ys_one_hot\n logit_other = tf.reduce_max(logit_other, 1)\n\n self.setup_xs = xs_var.assign(tf.reshape(self.xs_ph, xs_shape))\n self.setup_ys = ys_var.assign(self.ys_ph)\n self.setup_cs = cs_var.assign(self.cs_ph)\n self.setup_d_ws = d_ws.assign(tf.zeros_like(d_ws))\n\n if distance_metric == 'l_2':\n dists = tf.reduce_sum(tf.square(self.xs_adv - xs_var), axis=1)\n elif distance_metric == 'l_inf':\n dists = tf.reduce_max(tf.abs(self.xs_adv - xs_var), axis=1)\n else:\n raise NotImplementedError\n\n if goal == 't' or goal == 'tm':\n score = tf.maximum(0.0, logit_other - logit_target + confidence)\n elif goal == 'ut':\n score = tf.maximum(0.0, logit_target - logit_other + confidence)\n else:\n raise NotImplementedError\n self.goal = goal\n\n loss = dists + cs_var * score\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.optimizer_step = optimizer.minimize(loss, var_list=[d_ws])\n self.setup_optimizer = tf.variables_initializer(optimizer.variables())\n\n self.score = score\n self.logits = logits\n self.dists = dists\n\n def config(self, **kwargs):\n cs = np.array(kwargs['cs'], dtype=self.model.x_dtype.as_numpy_dtype)\n self.cs = np.repeat(cs, self.batch_size) if cs.shape == () else cs\n self.iteration = kwargs['iteration']\n self.search_steps = kwargs['search_steps']\n self.binsearch_steps = kwargs['binsearch_steps']\n\n def batch_attack(self, xs, ys, ys_target, session):\n ys_flatten_max = self.batch_size * self.model.n_class\n ys_flatten = np.arange(0, ys_flatten_max, self.model.n_class) + ys\n\n cs = self.cs.copy()\n ys_input = ys_target if self.goal == 't' or self.goal == 'tm' else ys\n session.run((self.setup_xs, self.setup_ys, self.setup_d_ws),\n feed_dict={self.xs_ph: xs, self.ys_ph: ys_input})\n\n xs_adv = np.copy(xs)\n\n # find c to begin with\n found = np.repeat(False, self.batch_size)\n min_dists = np.repeat(self.model.x_dtype.max, self.batch_size)\n for _ in range(self.search_steps):\n session.run(self.setup_optimizer)\n session.run(self.setup_cs, feed_dict={self.cs_ph: cs})\n for _ in range(self.iteration):\n session.run(self.optimizer_step)\n score_, logits_, xs_adv_, dists_ = session.run([\n self.score, self.logits, self.xs_adv_output, self.dists])\n if self.goal == 'ut' or self.goal == 'tm':\n diff = logits_.max(axis=1) - logits_.take(ys_flatten)\n succ_ = diff > self.confidence\n else:\n succ_ = score_ < 1e-12\n\n better_dists = dists_ < min_dists\n to_update = np.logical_and(succ_, better_dists)\n xs_adv[to_update] = xs_adv_[to_update]\n found[to_update] = True\n if np.all(found):\n break\n else:\n cs[np.logical_not(found)] *= 10.0\n\n cs_hi = cs\n cs_lo = np.zeros_like(cs)\n cs = (cs_hi + cs_lo) / 2\n\n # binsearch\n for _ in range(self.binsearch_steps):\n session.run(self.setup_optimizer)\n session.run(self.setup_cs, feed_dict={self.cs_ph: cs})\n\n succ = np.repeat(False, self.batch_size)\n for _ in range(self.iteration):\n session.run(self.optimizer_step)\n score_, logits_, xs_adv_, dists_ = session.run((\n self.score, self.logits, self.xs_adv_output, self.dists))\n if self.goal == 'ut' or self.goal == 'tm':\n succ_ = logits_.max(axis=1) - logits_.take(ys_flatten) \\\n > self.confidence\n else:\n succ_ = score_ < 1e-12\n better_dists = dists_ < min_dists\n to_update = np.logical_and(succ_, better_dists)\n xs_adv[to_update] = xs_adv_[to_update]\n succ[to_update] = True\n\n cs_hi[succ] = cs[succ]\n not_succ = np.logical_not(succ)\n cs_lo[not_succ] = cs[not_succ]\n cs = (cs_hi + cs_lo) / 2.0\n\n return xs_adv\n","repo_name":"adversarial-robustness-benchmark/adversarial-robustness-benchmark","sub_path":"realsafe/attack/cw.py","file_name":"cw.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"62"} +{"seq_id":"11226668114","text":"\nimport pyupbit\nimport time\nimport pandas as pd\nimport talib\nimport numpy as np\n\ncnt = 15000#받아올 데이터 수\ncoinlist = pyupbit.get_tickers(fiat=\"KRW\")\n#[\"KRW-AXS\",\"KRW-FLOW\",\"KRW-SAND\",\"KRW-XRP\",\"KRW-DOGE\",\"KRW-ETC\",\"KRW-ETH\",\"KRW-BTC\", \"KRW-XLM\",\"KRW-SNT\",\"KRW-MLK\",\"KRW-WAVES\"]\nsetTime = \"minute5\"\n\n# 0 1 2 3 4 5 6 7\n\n# def market_unit(price):\n# if price >= 2000000:\n# tick_size = 1000\n# elif price >= 1000000:\n# tick_size = 500\n# elif price >= 500000:\n# tick_size = 100\n# elif price >= 100000:\n# tick_size = 50\n# elif price >= 10000:\n# tick_size = 10\n# elif price >= 1000:\n# tick_size = 5\n# elif price >= 100:\n# tick_size = 1\n# elif price >= 10:\n# tick_size = 0.1\n# else:\n# tick_size = 0.01\n# return tick_size\n#\n\nfor j in range(len(coinlist)):\n date = None\n dfs = []\n\n for i in range(cnt // 200 + 1):\n\n if i < cnt // 200:\n\n df = pyupbit.get_ohlcv(coinlist[j], to=date, interval=setTime)\n date = df.index[0]\n elif cnt % 200 != 0:\n df = pyupbit.get_ohlcv(coinlist[j], to=date, interval=setTime, count=cnt % 200)\n else:\n break\n\n dfs.append(df)\n time.sleep(0.1)\n\n\n # df가 DataFrame형식으로 저장되어있음\n df = pd.concat(dfs).sort_index()\n\n # RSI값 저장\n rsi14 = talib.RSI(df['close'], 14)\n df['rsi'] = rsi14\n\n #볼린저밴드 저장\n #upper, middle, lower = talib.BBANDS(df['close'], timeperiod=10, nbdevup=0.5, nbdevdn=0.5)\n upper, middle, lower = talib.BBANDS(df['close'], 20, 2)\n df['upper'] = upper\n df['middle'] = middle\n df['lower'] = lower\n\n\n buyprice = 0.0\n myasset = 10000\n buy = False\n\n # 총 손익\n\n per = 1\n\n count = 0 # 거래횟수\n wincount = 0 # 이득인거래수\n\n target_buyRSI = 30\n target_sellRSI = 70\n\n for i, row in df.iterrows():\n\n if row['rsi'] != None and row['low'] <= row['lower'] and buy == False:\n buy = True\n count += 1\n buyprice =(row['low'])\n myasset = 0.9995 * myasset\n # print(\"나는 이가격에 샀다 : \",buyprice)\n\n\n elif row['rsi'] != None and buy == True and (row['high'] >buyprice*1.02 or\n row['low']row['upper'] ):\n\n\n\n if row['high'] >= buyprice * 1.02 or row['high'] >= row['upper']:\n\n\n\n sellprice = (row['high'])\n\n else:\n sellprice = (row['low'])\n\n\n\n\n myasset = myasset * (1 + (sellprice - buyprice) / buyprice)\n myasset -= myasset * 0.0005\n buy = False\n per = per * (1 + (sellprice - buyprice) / buyprice - 0.001)\n if sellprice - buyprice > 0:\n wincount += 1\n # print(\"나는 이가격에 팔았다 : \", row['open'])\n\n print(\"----------------------------------------------┐\")\n print(\"거래 코인 : \", coinlist[j], \" 거래분봉 : \", setTime)\n print(\"퍼센트 : \", per)\n print(\"내 자산 : \", myasset)\n print(\"거래 수 :\", count)\n print(\"이득 :\", wincount, \" 손해:\", count - wincount)\n print(\"----------------------------------------------┘\")\n\n\n #df.to_excel(coinlist[j]+\".xlsx\")\n\n\n\n","repo_name":"rong5026/UpbitTrading","sub_path":"BackTesting.py","file_name":"BackTesting.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5670397794","text":"from socket import *\nimport re\nimport os\nimport pickle\nfrom time import *\nimport csv\nfrom design import * \nd=dict()\ndef p_welcome():\n print(\"Let's Start!!!\\n\")\n sleep(2)\n print(\"PRESS ENTER TO CONTINUE\")\n val=input()\n if(val==\"\"):\n os.system('clear')\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\tAll The Best\",end=\"\")\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\U0001f600\",\"\\U0001f600\",\"\\U0001f600\") \n sleep(2)\n os.system('clear')\ndef quiz_contest(filename):\n with open(filename, mode ='r')as file: \n p_welcome()\n csvFile = csv.reader(file)\n for lines in csvFile:\n print ('\\033[1m \\033[91m \\033[4m' + lines[1] + '\\033[0m'+\"\\n\")\n #print(lines[1],\"\\t\\n\")\n sleep(0.5)\n print(\"1.\",lines[2],\"\\n\")\n sleep(0.5)\n print(\"2.\",lines[3],\"\\n\")\n sleep(0.5)\n print(\"3.\",lines[4],\"\\n\")\n sleep(0.5)\n print(\"4.\",lines[5],\"\\n\")\n sleep(0.5)\n ans=str(input(\"\\U0001f600 enter your answer number:\\n\"))\n sleep(2)\n os.system('clear')\n \n if(ans==\"1\"):\n d[lines[0]]=lines[2]\n elif(ans==\"2\"):\n d[lines[0]]=lines[3]\n elif(ans==\"3\"):\n d[lines[0]]=lines[4]\n elif(ans==\"4\"):\n d[lines[0]]=lines[5]\n else:\n d[lines[0]]=\"wrong\" \n print(\"good going!!!:-)\") \n sleep(1) \n os.system('clear') ","repo_name":"99002453/Python-micro-project","sub_path":"source/client_funcs.py","file_name":"client_funcs.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"27514185027","text":"\n# Github : https://github.com/adarsh2104\n# HR-Profile: https://www.hackerrank.com/adarsh_2104\n# Challenge : https://www.hackerrank.com/challenges/s10-interquartile-range\n# Max Score : 30\n\n\nnumbers = int(input())\ninput_array = [int(x) for x in input().split()]\ninput_freq = [int(x) for x in input().split()]\n\n\ndef find_medium_sum(array):\n if len(array) % 2 == 1:\n return array[len(array) // 2]\n else:\n return (array[len(array) // 2] + array[len(array) // 2 - 1]) / 2\n \n \nfinal_list = []\nfor index in range(numbers):\n final_list += ([input_array[index]] * input_freq[index])\n\nfinal_list.sort()\n\ninter_qrange = float(find_medium_sum(final_list[len(final_list) // 2 + len(final_list) % 2:]) - find_medium_sum(final_list[:len(final_list)//2]))\nprint(inter_qrange)\n","repo_name":"adarsh2104/Hacker-Rank-Days-of-Statistics","sub_path":"Challenges/ Interquartile Range.py","file_name":" Interquartile Range.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"18662226326","text":"from typing import List\n\n\nclass Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n \"\"\"O(n) loop over the list twice, can mark existing numbers by\n placing them outside of the 1 <= n <= 10^5 bounds. Here I use negative numbers\n \"\"\"\n\n for num in nums:\n if nums[abs(num) - 1] > 0:\n nums[abs(num) - 1] *= -1\n\n result = []\n for i in range(len(nums)):\n if nums[i] > 0:\n result.append(i + 1)\n\n return result\n","repo_name":"dvdblk/leetcode","sub_path":"problems/0448-find-all-numbers-disappeared-in-an-array/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41859376841","text":"# -*- coding: utf-8 -*-\n# -------------------------------------------------------------------------\n# This is a sample controller\n# this file is released under public domain and you can use without limitations\n# -------------------------------------------------------------------------\nimport os\nimport subprocess\nimport re\nimport ast\nimport sys\nfrom urllib.parse import unquote\n\ndef index():\n# definir un formulaire pour remplir les champs d'une BD\n# form = SQLFORM(db.quizz_banque).process()\n# rows = db(db.quizz_banque).select()\n# il faut penser à modifier le layout\n# return form\n return locals()\n\ndef execute_script_verif(rep_etud,qid,Langage,script_verif):\n Langage = \"Python\"\n #todo pour une prochaine version, integrer verification en Sage\n pid = os.getpid()\n interprete_python = \"/usr/bin/python3\"\n interprete_sage = \"/opt/SageMath/local/bin/python3\"\n if (Langage == \"Python\"):\n run_time_limit = 5 # secondes\n interprete = interprete_python\n else:\n interprete = interprete_sage\n run_time_limit = 7 # secondes \n if interprete == interprete_sage:\n script = \"/tmp/verif_question_\"+str(qid)+\"_\"+str(pid)+\".sage.py\"\n else:\n script = \"/tmp/verif_question_\"+str(qid)+\"_\"+str(pid)+\".py\" \n try:\n fic_q = open(script,\"wt\",encoding=\"utf-8\")\n except Exception as erreur:\n message = \"Erreur lors de la sauvegarde de votre script : \"+str(erreur)\n return False,message\n else:\n #if interprete == interprete_sage:\n # fic_q.write(\"from sage.all_cmdline import *\\n\")\n #rep_tempo = rep_etud.replace('\"',r'\\\"')\n rep_tempo = rep_etud\n fic_q.write(\"try:\\n \"+rep_tempo+\"\\nexcept:\\n print('Error')\\n\")\n fic_q.close()\n env = os.environ.copy()\n env['HOME'] = \"/tmp\"\n check_script_= subprocess.run([interprete,script],timeout=run_time_limit,universal_newlines=True,text=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, encoding=\"utf-8\",env=env)\n if check_script_.returncode!=0:\n message = \"\"\"Erreur de syntaxe dans votre réponse
    \n\"\"\"+check_script_.stdout.lstrip()\n return False,message\n fic_q = open(script,\"wt\",encoding=\"utf-8\")\n script_verif = script_verif.replace('reponse_etudiant',rep_etud) \n fic_q.write(script_verif)\n #fic_q.write(script_verif.replace('\"',r'\\\"')+\"\\n\")\n #if interprete != interprete_python:\n # fic_q.write(\"sage_eval('None',cmds=__toexec,locals=__dicX_)\\n\")\n #if interprete != interprete_python:\n # fic_q.write(' __retdicX_[__locvar_] = str(__dicX_[__locvar_])\\n')\n fic_q.close()\n try:\n # on lance l'execution du script cree\n # les sorties standard et erreur seront recuperees dans check_script_.stdout\n env = os.environ.copy()\n env['HOME'] = \"/tmp\"\n check_script_= subprocess.run([interprete,script],timeout=run_time_limit,universal_newlines=True,text=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, encoding=\"utf-8\",env=env)\n #check_script_= subprocess.check_output([interprete,script],timeout=run_time_limit,universal_newlines=True,stderr=subprocess.STDOUT, encoding=\"utf-8\",env=env)\n except Exception as erreur:\n message = str(erreur).lstrip().split(' ')\n #message = str(erreur).lstrip()+'\\n'+str(erreur.output)\n mess_err = \"\"\"Erreur lors de l'exécution\nde votre script de vérification:
    \n\"\"\"+script+\"\"\"\n\"\"\"+\" \".join(message[3:])\n message = mess_err\n #fic_q=open(\"/tmp/err.txt\",\"w\")\n #fic_q.write(\"ooo : \")\n #fic_q.close()\n return False,message\n else:\n if check_script_.returncode!=0:\n message = \"\"\"Erreur code retour lors de l'exécution\nde votre script de vérification:
    \n\"\"\"+check_script_.stdout.lstrip()\n return False,message\n else:\n return check_script_.stdout.strip(),\"\"\n# return check_script_.strip(),\"\"\n \n\n\ndef verif_reponse():\n# fonction utiliser uniquement dans le mode preview\n# TODO faire un merge avec ec qui est fait en mode quizzmode\n ok = False\n mess = \"\"\n if request.args[1] == 'multiple':\n lareponse = ast.literal_eval(request.vars.lareponse)\n labonnereponse = db((db.gen_question.QuestionId == request.args[2]) & (db.gen_question.user_id == auth.user.id)).select(db.gen_question.Reponse_calcule)\n labonnereponse = ast.literal_eval(labonnereponse[0].Reponse_calcule)\n labonnereponse.sort()\n ok = (lareponse == labonnereponse)\n if request.args[0] == 'get':\n return labonnereponse\n else:\n # on est sur une question simple\n champs = db((db.gen_question.QuestionId == request.args[2]) & (db.gen_question.user_id == auth.user.id)).select(db.gen_question.QuestionId,db.gen_question.Langage,db.gen_question.Verification,db.gen_question.Reponse_calcule)\n if request.args[0] == 'get':\n return champs[0].Reponse_calcule\n if champs[0].Verification is None:\n #labonnereponse = db(db.gen_question.QuestionId == request.args[2]).select(db.gen_question.Reponse_calcule)\n labonnereponse = champs[0].Reponse_calcule\n lareponse = unquote(request.vars.lareponse)\n ok = (lareponse == labonnereponse)\n else:\n if request.args[0] == 'get':\n return \"A vous de le faire !\"\n ok2,mess = execute_script_verif(unquote(request.vars.lareponse),champs[0].QuestionId,champs[0].Langage,champs[0].Verification)\n if mess == \"\":\n # le script s'est execute\n # soit il a renvoye True car resultat correct\n # soit il a renvoye False car resultat incorrect\n # soit il a renvoye False + message car syntaxe de la reponse incorrect\n ok = (ok2 == 'True')\n if not(ok) and str(ok2[5:])!=\"\": \n mess = '
    '+str(ok2[5:])+'
    ' \n else:\n ok = False\n mess = '
    '+mess+'
    '\n #return(\"\"+ok2+\" \"+mess+\"\") \n if ok: \n# return (\"
    Bonne réponse ! \"+str(lareponse)+\" \"+str(labonnereponse))\n return (\" Bonne réponse !\")\n else:\n# return(\"
    Mauvaise réponse ! \"+str(lareponse)+\" \"+str(labonnereponse))\n return(\" Mauvaise réponse !
    \"+mess)\n \ndef view_enonce():\n enonce = db(db.quizz_banque.id == request.args(0)).select(db.quizz_banque.Titre, db.quizz_banque.Enonce)\n titre, enonce = enonce[0].Titre, enonce[0].Enonce\n return locals()\n\ndef preload_question():\n if request.args[0] == 'quizzmode':\n # on met a jour dans la bd du quizz actif, l'id de la question courante\n db(db.running_quizz.idrunning==1).update(idquestion = int(request.args[1]))\n return locals() \n\ndef view_question():\n # lors du clic pour voir une question, on met a jour la bd gen_question \n # pour consigner la bonne reponse apres interpretation du script python si necessaire\n # ceci permet au script verif_reponse de verifier l'exactitude de la bonne reponse\n # en fonction des données aleatoires générées lors de la visualisation de la question\n # request.args = 0:'quizzmode' ou 'preview', 1:id_question, 2:id_quizz, 3:position_question dans le quizz, 4: nbetudiants\n def update_genquestion(id,Reponse,Script_verif,Lang):\n existe = db((db.gen_question.QuestionId == id) & (db.gen_question.user_id == auth.user.id)).select()\n if len(existe) == 0:\n db.gen_question.insert(Reponse_calcule = Reponse, QuestionId = id, Langage = Lang, Verification = Script_verif)\n else:\n db((db.gen_question.QuestionId == id) & (db.gen_question.user_id == auth.user.id)).update(Reponse_calcule=Reponse, Langage = Lang, Verification = Script_verif)\n \n q_ = db.quizz_banque(request.args(1))\n if request.args[0] == 'quizzmode':\n id_quizz = int(request.args(2))\n position_question = int(request.args(3))\n# liste_questions = db(db.quizz_quizz.id == id_quizz).select(db.quizz_quizz.Questions)\n liste_questions = db(db.running_quizz.idrunning==1).select(db.running_quizz.liste_questions)\n# liste_questions = liste_questions[0].Questions\n liste_questions = liste_questions[0].liste_questions\n b_prev = -1\n b_next = -1\n if position_question > 0:\n position_prev_question = position_question - 1\n id_prev_question = liste_questions[position_prev_question]\n b_prev = XML(P(A(SPAN(_class=\"icon fa fa-arrow-circle-left fa-lg\"),_href=URL('preload_question',args=['quizzmode',id_prev_question,id_quizz,position_prev_question,request.args(4)])),_class='navbar-brand'))\n if position_question < len(liste_questions)-1:\n position_next_question = position_question + 1\n id_next_question = liste_questions[position_next_question]\n b_next = XML(P(A(SPAN(_class=\"icon fa fa-arrow-circle-right fa-lg\"),_href=URL('preload_question',args=['quizzmode',id_next_question,id_quizz,position_next_question,request.args(4)])),_class='navbar-brand')) \n num_question = request.args[1]\n if (q_.Langage !='Aucun') and (q_.Script is not None):\n # on va verifier que le temps d'execution du code ne depasse pas\n # le timeout prévu\n pid = os.getpid()\n # tout d'abord on cree un fichier python contenant le source\n # python de la question en rajoutant pour chaque ligne de la forme\n # __val = expression\n # une instruction de la forme print('val_=',val_) \n # de façon a generer un script qui renvoie sur la sortie standard\n # le nom des variables et leurs valeurs\n interprete_python = \"/usr/bin/python3\"\n interprete_sage = \"/opt/SageMath/local/bin/python3\"\n if (q_.Langage == \"Python\"):\n interprete = interprete_python\n run_time_limit = 5 # secondes\n else:\n interprete = interprete_sage\n run_time_limit = 7 # secondes\n if interprete == interprete_sage:\n script = \"/tmp/question_\"+str(num_question)+\"_\"+str(pid)+\".sage.py\"\n else:\n script = \"/tmp/question_\"+str(num_question)+\"_\"+str(pid)+\".py\" \n try:\n fic_q = open(script,\"wt\",encoding=\"utf-8\")\n except Exception as erreur:\n response.flash = \"Erreur lors de la sauvegarde de votre script : \"+str(erreur)\n return(locals())\n else:\n if interprete == interprete_sage:\n fic_q.write(\"from sage.all_cmdline import *\\n\")\n fic_q.write('__toexec=\"\"\"\\n')\n fic_q.write(q_.Script.replace('\"',r'\\\"'))\n fic_q.write('\\n\"\"\"\\n')\n fic_q.write(\"import re\\n__dicX_={}\\n\")\n if interprete == interprete_python:\n fic_q.write(\"exec(__toexec,{},__dicX_)\")\n else:\n fic_q.write(\"sage_eval('None',cmds=__toexec,locals=__dicX_)\\n\")\n fic_q.write(\"\"\"\n__p = re.compile('^\\s*(__[^ :]+)$')\n__retdicX_ = {}\nfor __locvar_ in __dicX_:\n __mXx_ = __p.match(__locvar_)\n if __mXx_ is not None:\n\"\"\")\n if interprete == interprete_python:\n fic_q.write(' __retdicX_[__locvar_] = __dicX_[__locvar_]\\n')\n else:\n fic_q.write(' __retdicX_[__locvar_] = str(__dicX_[__locvar_])\\n')\n fic_q.write('print(__retdicX_)\\n')\n fic_q.close()\n try:\n # on lance l'execution du script cree\n # les sorties standard et erreur seront recuperees dans check_script_.stdout\n# check_script_=subprocess.run([\"/usr/bin/python3\",script_python],timeout=run_time_limit,universal_newlines=True,text=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, encoding=\"utf-8\")\n env = os.environ.copy()\n env['HOME'] = \"/tmp\"\n# env['PATH'] = \"/home/pascalveron/.local/lib/python3.8/site-packages\"\n# env['PYTHONPATH'] = \"/home/pascalveron/.local/lib/python3.8/site-packages\" \n \n# check_script_= subprocess.run([interprete,script],timeout=run_time_limit,universal_newlines=True,text=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, encoding=\"utf-8\",env=env)\n# check_script_= subprocess.run([interprete,script],timeout=run_time_limit,universal_newlines=True,text=True,stdout=subprocess.PIPE, encoding=\"utf-8\",env=env)\n check_script_= subprocess.check_output([interprete,script],timeout=run_time_limit,universal_newlines=True,stderr=subprocess.STDOUT, encoding=\"utf-8\",env=env)\n except Exception as erreur:\n #fic_q = open(\"/tmp/toto.txt\",\"wt\",encoding=\"utf-8\")\n# erreur = sys.exc_info()[0]\n message = str(erreur).lstrip()+'
    '+str(erreur.output)\n message = message.replace('\\n','
    ')\n #fic_q.write(message)\n #fic_q.close()\n erreur_message = \"\"\"___Erreur lors de l'exécution\nde votre script : \n
    \"\"\"+script+\"\"\"\n\"\"\"+\" \"+message\n erreur_message=XML(erreur_message[3:])\n return locals()\n else:\n # if check_script_.returncode!=0:\n # response.flash = \"\"\"___Erreur code retour lors de l'exécution\n#de votre script :\n#\"\"\"+check_script_.lstrip()\n# return(locals())\n # si tout s'est bien passe\n # on evalue la sortie standard de façon à ce que\n # les variables déclarées dans le formulaire\n # soient créées, elles vont être placées dans dico_loc_\n # qui est l'équivalent de locals()\n # le deuxième paramètre de exec est utilisé pour passer\n # le dico globals(), ici c'est inutile\n # on doit placer check_script_ dans dico_loc pour que exec puisse fonctionner\n #dico_loc_= {'q_':q_}\n sortie__ = ast.literal_eval(check_script_)\n #vars_attribute = re.compile(r'\\{\\{=__[^ _}]*(_inline-fig|_inline-tex|_center-fig|_center-tex)\\}\\}')\n vars_attribute = re.compile(r'\\{\\{=__[^ _}]*(_inline-fig|_center-fig|_inline-python)\\}\\}')\n for var_ in sortie__:\n enonce = q_.Enonce\n has_attribute = vars_attribute.search(enonce)\n if has_attribute is not None:\n# q_.Enonce = enonce.replace('{{='+var_+has_attribute.group(1)+'}}', '``'+str(sortie__[var_])+'``:'+has_attribute.group(1)[1:])\n q_.Enonce = enonce.replace('{{='+var_+has_attribute.group(1)+'}}', '
    '+str(sortie__[var_])+'
    ')\n else:\n q_.Enonce = q_.Enonce.replace('{{='+var_+'}}', str(sortie__[var_]))\n has_attribute = vars_attribute.search(q_.Reponse)\n if has_attribute is not None:\n q_.Reponse = q_.Reponse.replace('{{='+var_+has_attribute.group(1)+'}}', '
    '+str(sortie__[var_])+'
    ')\n else:\n q_.Reponse = q_.Reponse.replace('{{='+var_+'}}', str(sortie__[var_]))\n if (q_.Verification) is not None:\n q_.Verification = q_.Verification.replace('{{='+var_+'}}', str(sortie__[var_])) \n if q_.Nature == 'Multiple':\n for j in range(1,9):\n qchoix = q_['Choix'+str(j)]\n if qchoix is not None:\n has_attribute = vars_attribute.search(qchoix)\n if has_attribute is not None:\n q_['Choix'+str(j)] = qchoix.replace('{{='+var_+has_attribute.group(1)+'}}', '
    '+str(sortie__[var_])+'
    ')\n else:\n q_['Choix'+str(j)] = qchoix.replace('{{='+var_+'}}',str(sortie__[var_]))\n if q_.Verification is None:\n update_genquestion(q_.id,q_.Reponse,None,q_.Langage)\n else:\n update_genquestion(q_.id,q_.Reponse,q_.Verification,q_.Langage) \n if request.args[0] == 'quizzmode':\n return({'q_': q_, 'ok_': num_question, 'b_prev': b_prev, 'b_next': b_next, 'num_question': position_question+1})\n else:\n return({'q_': q_, 'ok_': num_question})\n update_genquestion(q_.id,q_.Reponse,q_.Verification,None)\n if request.args[0] == 'quizzmode':\n return({'q_': q_, 'ok_': num_question, 'b_prev': b_prev, 'b_next': b_next, 'num_question': position_question+1})\n else:\n return({'q_': q_, 'ok_': num_question})\n\n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef show_selectcat(base1,base2,id_cat,nb_space,id_option_selected,id_cat_select,oncontinue):\n# permet de generer le contenant l'arborescence des categories\n# en excluant les branches filles de id_cat_select et aussi id_cat_select (si cette valeur vaut -1 tout est affiche)\n# la fonction est recursive et appelee avec id_cat = id de Racine\n# nb_space permet de gerer les espaces a mettre dans \\n\"\n prochain = show_selectcat(base1,base2,row.id,nb_space+2,id_option_selected,id_cat_select,oncontinue)\n if prochain != -1:\n chaine += prochain\n return(chaine)\n else:\n return -1\n \n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef manage():\n global categorie_courante\n \n def move_item(liste_id):\n # ici le dernier element de la liste est l'id de la categorie destination\n # les autres elements sont les id des questions\n cat_to_go = liste_id[-1]\n for id in liste_id[:-1]:\n db(banque.id == id).update(Categorie=cat_to_go)\n if banque_name == 'quizz_quizz':\n session.flash = str(len(liste_id)-1)+XML(\" quizz(es) déplacé(s)\")\n else:\n session.flash = str(len(liste_id)-1)+XML(\" question(s) déplacée(s)\")\n\n def delete_item(liste_id):\n for id in liste_id:\n db(banque.id == id).delete()\n if banque_name == 'quizz_quizz':\n session.flash = str(len(liste_id))+XML(\" quizz(es) effacée(s)\")\n else:\n session.flash = str(len(liste_id))+XML(\" question(s) effacée(s)\")\n\n def save_categorie(form):\n # ici on recupere la derniere categorie selectionnee en cas d'erreur dans le formulaire\n global categorie_courante\n\n categorie_courante = form.vars.Categorie\n\n def save_or_update(form):\n if banque_name == 'quizz_quizz':\n # on recupere la liste des id pour l'ordre d'affichage des questions\n liste_id_quizz = request.vars.liste_id_quizz.split(':')\n # on met à jour le champ du formulaire contenant cette liste\n # ainsi ceci mettra automatiquement à jour la BD lors de la validation\n # du formulaire\n # si on est sur la page d ecreation d'un quizz, ce champ contiendra par defaut la liste ['-1']\n form.vars.Questions = liste_id_quizz\n # ici on regarde si c'est la premiere sauvegarde d'une nouvelle question\n # auquel cas on cree l'entree dans la BD (form.process()) puis on passe en mode edition\n # ou on quitte le formulaire selon ce que l'utilisateur a choisi\n if (request.vars.justsave == '1' or request.vars.justsave == '3'):\n form.vars.id = banque.insert(**dict(form.vars))\n session.flash = XML(\"Sauvegarde effectuée\")\n if request.vars.justsave == '1':\n redirect(URL('manage', args = [request.args[0],'edit',banque_name, form.vars.id] , user_signature=True))\n else:\n if 'keywords' in request.vars:\n redirect(URL('manage', args = [request.args[0]], vars=dict(keywords=request.vars.keywords) , user_signature=True, hash_vars=False))\n else:\n redirect(URL('manage', args = [request.args[0]], user_signature=True))\n # sinon si c'est une sauvegarde alors que l'on etait en mode edition on fait une mise a jour de la db\n # et on active une erreur invisible, ce qui evite la soumission du formulaire\n elif (request.vars.justsave =='2' or request.vars.justsave =='4'):\n form.record.update_record(**dict(form.vars))\n form.errors.justsave = \"Sauvegarde OK\"\n response.flash = XML(\"Sauvegarde effectuée\")\n # si justsave vaut4 on veut quitter la page\n if (request.vars.justsave == '4'):\n session.flash = XML(\"Sauvegarde effectuée\")\n # on regarde si une categorie n'a pas ete selectionnee\n # auquel cas il faut revenir avec cette selection active\n if 'keywords' in request.vars:\n redirect(URL('manage', args = [request.args[0]], vars=dict(keywords=request.vars.keywords) , user_signature=True, hash_vars=False))\n else:\n redirect(URL('manage', args = [request.args[0]], user_signature=True))\n \n def check_reponse(form):\n erreur = False\n message_erreur = XML(\"Votre réponse doit être une liste non vide d'au plus 8 entiers distincts compris entre 1 et 8
    ou une expression du type {{=__variable}}\")\n if (form.vars.Nature == \"Multiple\"):\n reponse = form.vars.Reponse\n reponse = reponse.strip()\n if ((reponse[0:5] != \"{{=__\") or reponse[-2:] != \"}}\"):\n if ((reponse[0] != '[') or (reponse[-1] !=']')):\n form.errors.Reponse = message_erreur\n response.flash = XML(\"Erreur(s) dans le champ Réponse\")\n erreur = True\n else:\n try:\n reponse_l = ast.literal_eval(reponse)\n except:\n form.errors.Reponse = message_erreur\n response.flash = XML(\"Erreur(s) dans le champ Réponse\")\n erreur = True\n else:\n reponse_s = set(reponse_l)\n taille = len(reponse_l)\n if (taille != len(reponse_s)):\n form.errors.Reponse = message_erreur\n response.flash = XML(\"Erreur(s) dans le champ Réponse\")\n erreur = True\n else:\n ok = ((taille <= 8) and isinstance(reponse_l[0],int) and (reponse_l[0] > 0) and (reponse_l[0] < 9))\n i = 1\n while ((ok) and ( i < taille)):\n ok = (isinstance(reponse_l[i],int) and (reponse_l[i] > 0) and (reponse_l[i] < 9))\n i = i + 1\n if not(ok):\n form.errors.Reponse = message_erreur\n response.flash = XML(\"Erreur(s) dans le champ Réponse\")\n erreur = True\n else:\n reponse_l.sort()\n form.vars.Reponse = str(reponse_l)\n else:\n #ici on verifie que si la reponse donnee est de la forme {{=_reponse}} que _reponse fait bien partie du script\n reponse = reponse[3:] # on supprime {{= \n reponse = reponse[:-2] # on supprime }}\n import re\n p = re.compile(reponse+' *=')\n if p.search(form.vars.Script) is None:\n form.errors.Reponse = XML(\"Identifiant \"+reponse+\" inconnu dans le script\")\n response.flash = XML(\"Erreur(s) dans le champ Réponse\")\n erreur = True\n # il faudrait alors verifier que la variable correspond bien à une liste mais ceci ne peut se faire qu'au moment\n # de l'evaluation de la question. On reporte cette verification dans view_question.html\n\n if not(erreur):\n save_or_update(form)\n\n if len(request.args)==0:\n session.flash = XML(request.args)\n redirect(URL('index',user_signature=True))\n tablename = request.args(0)\n banque = db[request.args(0)]\n banque_name = request.args(0)\n banque_categorie_name = request.args(0)+'_Categorie'\n if request.args[0] == 'quizz_banque':\n banque_categorie = db.categorie_banque\n response.view = 'default/manage_questions.html'\n elif request.args[0] == 'quizz_quizz':\n banque_categorie = db.categorie_quizz\n response.view = 'default/manage_quizz.html'\n else:\n redirect(URL('index',user_signature=True))\n # variable utilisee pour afficher correctement en mode edition ou creation la derniere categorie\n # selectionnee en cas de sauvegarde d'un formulaire avec erreur\n racine = db(banque_categorie.Nom == 'Racine').select()\n categorie_courante = racine[0].id\n if 'keywords' in request.vars:\n categorie_courante = request.vars.keywords.split('=')\n if len(categorie_courante)>=2:\n categorie_courante = categorie_courante[1]\n categorie_courante = re.findall(r'\\d+', categorie_courante)\n if len(categorie_courante) > 0:\n categorie_courante = int(categorie_courante[0])\n else:\n categorie_courante = racine[0].id\n elif len(request.args)>2:\n if request.args[1] == 'edit':\n # on recupere l'id categorie de l'objet edite\n record = banque(request.args(3)) or redirect(URL('manage',user_signature=True))\n categorie_courante = record.Categorie\n nb_questions_in_racine = db(banque.Categorie == racine[0].id).count()\n banque.modified_on.readable = True\n if tablename == 'quizz_banque':\n mygrid = SQLFORM.grid(db.quizz_banque,args=[request.args(0)],fields=[db.quizz_banque.Titre, db.quizz_banque.Nature, db.quizz_banque.Enonce, db.quizz_banque.Langage, db.quizz_banque.modified_on], headers = {'quizz_banque.Enonce':'Enoncé','quizz_banque.Nature':'Type'}, deletable=True, duplicatable=True, editable=True, showbuttontext=False,selectable = [('Déplacer vers >>',lambda ids : move_item(ids)),('Supprimer',lambda ids : delete_item(ids))],exportclasses=dict(xml=False,html=False,tsv=False,tsv_with_hidden_cols=False,csv=False,csv_with_hidden_cols=False),orderby=db.quizz_banque.modified_on,user_signature=True,onvalidation=check_reponse,onfailure=save_categorie,maxtextlength=65, client_side_delete=True)\n else:\n mygrid = SQLFORM.grid(db.quizz_quizz,args=[request.args(0)], fields=[db.quizz_quizz.Titre], deletable=True, duplicatable=True, editable=True, searchable=True, showbuttontext=False,selectable = [('Déplacer vers >>',lambda ids : move_item(ids)),('Supprimer',lambda ids : delete_item(ids))],exportclasses=dict(xml=False,html=False,tsv=False,tsv_with_hidden_cols=False,csv=False,csv_with_hidden_cols=False),orderby=db.quizz_quizz.modified_on,onvalidation=save_or_update,onfailure=save_categorie,user_signature=True,maxtextlength=40,client_side_delete=True)\n # dans le cas ou on affiche le quizz il faut rajouter un formulaire permettant de selectionner\n # les questions que l'on souhaite enlever du formulaire\n # on passera la liste des id dans un champ cache intitul'e liste_q_to_del\n form_delete = FORM(INPUT(_type='submit', _id=\"remove_from_quizz\", _class=\"button btn btn-default btn-secondary\", _value=XML(\"Supprimer du quizz la sélection\")), hidden=dict(liste_q_to_del='-1'),_name='form_delete')\n if form_delete.process(formname='form_delete').accepted:\n # on recupere dans la balise de type hidden la liste des id des questions a supprimer\n liste_q_to_del = request.vars.liste_q_to_del.split(':')\n liste_q_quizz = db(db.quizz_quizz.id == request.args[3]).select(db.quizz_quizz.Questions)\n liste_q_quizz = liste_q_quizz[0].Questions\n #liste_q_before = copy.copy(liste_q_quizz)\n for id_q in liste_q_to_del:\n liste_q_quizz.remove(int(id_q))\n # si la liste est vide on la reinitialise avec -1\n if len(liste_q_quizz) == 0:\n liste_q_quizz = [-1]\n db(db.quizz_quizz.id == request.args[3]).update(Questions = liste_q_quizz) \n response.flash = str(len(liste_q_to_del))+ XML(' question(s) supprimée(s) du quizz') \n \n # ajout d'une case a cocher cachee pour gerer les categories a deplacer\n my_extra_element = INPUT(_type='checkbox', _name='records', _id='cat_to_move', _value=racine[0].id)\n mygrid[1][0].append(my_extra_element)\n # ajout d'un input cache pour gerer l'icone \"Sauvegarder\" en mode creation et edition de question\n my_extra_element = INPUT(_type='hidden', _name='justsave', _value='0')\n mygrid[1][0].append(my_extra_element)\n # ajout d'un input cache pour gerer l'ordre d'affichage des questions du quizz\n my_extra_element = INPUT(_type='hidden', _name='liste_id_quizz', _value='-1')\n mygrid[1][0].append(my_extra_element)\n if len(request.args)>1:\n if request.args[1] == 'edit':\n to_select = categorie_courante\n elif request.args[1] == 'new':\n# on recupere la categorie courante\n to_select = categorie_courante\n# response.flash=XML(\"Yo \"+str(to_select))\n elif request.args[1] == 'view' and request.args[2] == 'quizz_banque':\n # ici on est dans la banque des questions et on a clique pour visualiser une question\n redirect(URL('preload_question',args=['preview',request.args[3]], user_signature=True))\n elif len(request.args) > 4:\n # ici on est sur la liste des quizz, on a clique pour ajouter une question et on a clique pour visualiser le contenu d'une question \n redirect(URL('preload_question',args=['preview',request.args[5]], user_signature=True)) \n elif request.args[1] == 'duplicate':\n new_record = db(banque.id == request.args[3]).select()\n new_record[0].Titre = '(Copie) ' + new_record[0].Titre\n banque.insert(**banque._filter_fields(dict(new_record[0])))\n if banque_name == 'quizz_quizz':\n session.flash = XML(\"Quizz dupliqué\")\n else:\n session.flash = XML(\"Question dupliquée\")\n redirect(URL('manage',args=[request.args[0]],vars=request.vars,user_signature=True))\n elif request.args[1] == 'update':\n to_select = racine[0].id\n # ici on active l'option selectable pour avoir simplement automatiquement les cases a cocher\n # mais la gestion se fait dans la view\n mygrid = SQLFORM.grid(db.quizz_banque,args=[request.args(0),request.args(1),request.args(2)],fields=[db.quizz_banque.Titre, db.quizz_banque.Nature, db.quizz_banque.Enonce, db.quizz_banque.Langage, db.quizz_banque.modified_on], headers = {'quizz_banque.Enonce':'Enoncé','quizz_banque.Nature':'Type'}, deletable=False, duplicatable=False, editable=False, showbuttontext=False,orderby=db.quizz_banque.modified_on,exportclasses=dict(xml=False,html=False,json=False,tsv=False,tsv_with_hidden_cols=False,csv=False,csv_with_hidden_cols=False),selectable = [('Ajouter',lambda ids : none_func(ids))], user_signature=True,maxtextlength=65)\n my_extra_element = INPUT(_type='hidden', _name='liste_id_to_add', _value='-1')\n mygrid[1][0].append(my_extra_element) \n form_add = FORM(INPUT(_type='submit', _id=\"add_to_quizz\", _class=\"button btn btn-default btn-secondary\", _value=XML(\"Ajouter\")), hidden=dict(liste_q_to_add='-1'),_name='form_add')\n # variable permettant de savoir si on doit rafraichir la page presentant le contenu du quizz\n refresh_page = 0\n if form_add.process(formname='form_add').accepted:\n # on recupere dans la balise de type hidden la liste des id des questions a ajouter\n # ainsi que l'id du quizz qui est le premier element de la liste\n liste_q_to_add = request.vars.liste_q_to_add.split(':')\n liste_q_to_add = list(map(int,liste_q_to_add))\n id_quizz = liste_q_to_add[0]\n # on recupere la liste actuelle des questions\n liste_q_quizz = db(db.quizz_quizz.id == id_quizz).select(db.quizz_quizz.Questions)\n liste_q_quizz = liste_q_quizz[0].Questions\n if liste_q_quizz[0] == -1:\n liste_q_quizz = []\n nbq_in_oldquizz = len(liste_q_quizz)\n # on fait l'union des 2 listes\n new_liste_q_quizz = list(dict.fromkeys(liste_q_quizz + liste_q_to_add[1:]))\n if liste_q_quizz != new_liste_q_quizz:\n db(db.quizz_quizz.id == id_quizz).update(Questions = new_liste_q_quizz) \n session.flash = str(len(new_liste_q_quizz)-nbq_in_oldquizz)+ XML(' question(s) ajoutées au quizz')\n else:\n session.flash = str(XML('Question(s) déjà présente(s) dans le quizz')) \n # on indique a la view qu'elle doit rafraichir l'affichage du quizz\n refresh_page = 1\n response.view = 'default/update_quizz.html'\n __select = show_selectcat(banque_categorie,banque,racine[0].id,1,to_select,-1,True).replace('\\n','')\n select_code = XML('')\n if request.args[1] == 'update':\n racine = db(db.categorie_banque.Nom == 'Racine').select()\n __select = show_selectcat(db.categorie_banque,db.quizz_banque,racine[0].id,1,to_select,-1,True).replace('\\n','')\n nb_questions_tot = db(db.quizz_banque).count()\n nb_questions_in_racine = db(db.quizz_banque.Categorie == racine[0].id).count()\n select_code = XML('')\n else:\n # on est sur la page d'entree de manage/xxxx\n # on recupere le code HTML de l'arborescence des categories\n to_select = racine[0].id\n __select = show_selectcat(banque_categorie,banque,racine[0].id,1,to_select,-1,True).replace('\\n','')\n nb_questions_tot = db(banque).count()\n select_code = XML('')\n select_move = XML('')\n # on cree le bouton ajouter qui permet d'aller sur le formulaire de creation en indiquant que c'est son 1er affichage\n# bouton_ajout = A(SPAN(_class=\"icon plus icon-plus glyphicon glyphicon-plus\"),SPAN('Ajouter',_class=\"buttontext button\",_title=\"Add record to database\"),_class=\"button btn btn-default btn-secondary\", _href=URL('manage_questions',args=['new','quizz_banque'], user_signature=True, vars=dict(_init='1'), hash_vars=False), _title=\"Add record to database\")\n return locals()\n \n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef manage_cat():\n mygrid = SQLFORM.grid(db.categorie_banque, deletable=True, editable=True, showbuttontext=False,user_signature=True)\n return locals()\n\n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef manage_categories():\n def delete_space_and_update(form,id_cat_parent):\n form.vars.Nom = form.vars.Nom.strip()\n# existe = db((db.categorie_banque.Parent_Id == id_cat_parent) & (db.categorie_banque.Nom == form.vars.Nom)).select()\n existe = db((labasecat.Parent_Id == id_cat_parent) & (labasecat.Nom == form.vars.Nom)).select()\n if len(existe)>0:\n cat_parent = labasecat(id_cat_parent)\n form.errors.Nom = XML(form.vars.Nom+' existe déjà dans la catégorie '+cat_parent.Nom);\n # on met a jour l'id du parent dont le nom a ete selectionne\n # via la balise select\n else:\n form.vars.Parent_Id = id_cat_parent\n\n \n def show_listcat(id_cat):\n# rows = db(db.categorie_banque.Parent_Id == id_cat).select(orderby=db.categorie_banque.Nom)\n rows = db(labasecat.Parent_Id == id_cat).select(orderby=labasecat.Nom)\n chaine = \"\"\n for row in rows:\n nb_questions_in_cat = db(labase2.Categorie == row.id).count()\n auxchaine = row.Nom+' ('+str(nb_questions_in_cat)+')'\n auxchaine += '  '+str(A(SPAN(_class=\"icon trash icon-trash glyphicon glyphicon-trash\"), data = {'action': 'delete'} ,_href=URL(\"manage_categories\",args=[request.args(0),\"delete\",row.id],user_signature=True)))\n auxchaine += '  '+str(A(SPAN(_class=\"icon pen icon-pencil glyphicon glyphicon-pencil\"),_href=URL(\"manage_categories\",args=[request.args(0),\"edit\",row.id],user_signature=True)))\n chaine += str(LI(XML(auxchaine),_class='select_ok'))+'\\n'\n souschaine = show_listcat(row.id)\n if souschaine is not None:\n chaine += souschaine\n if len(chaine) > 0:\n return('
      \\n'+XML(chaine)+'\\n
    \\n')\n else:\n return(\"\")\n \n if len(request.args) == 0:\n response.flash = \"Base inexistante\"\n return locals()\n if request.args[0] == \"question\":\n labasecat = db.categorie_banque\n labase2 = db.quizz_banque\n elif request.args[0] == \"quizz\":\n labasecat = db.categorie_quizz\n labase2 = db.quizz_quizz\n else:\n response.flash = \"Base inexistante\"\n return locals()\n# racine = db(db.categorie_banque.Nom == 'Racine').select()\n ok_ = 1\n racine = db(labasecat.Nom == 'Racine').select()\n# nb_questions_racine = db(db.quizz_banque.Categorie == racine[0].id).count()\n nb_objets_racine = db(labase2.Categorie == racine[0].id).count()\n if len(request.args)>1:\n if request.args[1] == 'edit':\n # on recupere le nom de la categorie qui est editee\n record = labasecat(request.args(2)) or redirect(URL('manage_categories',args=[request.args[0]],user_signature=True))\n # on recupere les infos sur la categorie parente\n# record_parent = db(db.categorie_banque.id == record.Parent_Id).select(db.categorie_banque.id)\n record_parent = db(labasecat.id == record.Parent_Id).select(labasecat.id)\n nomcategorie = record.Nom\n id_categorie_parent = record.Parent_Id\n id_categorie = record.id\n message = XML('Modification effectuée')\n form = SQLFORM(labasecat, record)\n elif request.args[1] == 'new':\n nomcategorie = \"\"\n id_categorie_parent = racine[0].id\n id_categorie = -1\n message = XML('Ajout effectué')\n form = SQLFORM(labasecat)\n elif request.args[1] == 'delete':\n #on verifie si la categorie possede des sous-categories\n vide = db(labasecat.Parent_Id == request.args[2]).select(labasecat.id)\n if len(vide) > 0:\n session.flash = XML(\"La catégorie contient des sous-catégories, suppression impossible\");\n else:\n db(labasecat.id == request.args[2]).delete()\n session.flash = XML(\"Catégorie supprimée\"); \n redirect(URL('manage_categories',args=[request.args(0)],user_signature=True))\n if request.args[1] in ['edit','new']: \n zeform = FORM(DIV(LABEL(labasecat.fields[1],_class='form-control-label col-sm-3'),DIV(INPUT(_name = \"Nom\", _value = nomcategorie, _class = \"form-control string\"),_class=\"col-sm-9\"),_class='form-group row'),\n DIV(LABEL(XML('Catégorie Parente'),_class='form-control-label col-sm-3'),DIV(XML(''),_class=\"col-sm-9\"),_class='form-group row'), \n INPUT(_type='hidden', _name='_formname', _value='editcat'),\n INPUT(_type='hidden', _name='Parent_Id', _value=id_categorie_parent),\n INPUT(_type='hidden', _name='id', _value=id_categorie),\n DIV(DIV(INPUT(_value=\"Soumettre\",_class=\"btn btn-primary\",_type='submit'),_class=\"col-sm-9 col-sm-offset-3\"),_class=\"form-group row\", _id=\"submit_record__row\"),\n _action='#', _enctype = 'multipart/form-data',_method='post', _class='web2py_form')\n def dsau(form,newval = request.vars.select_id):\n delete_space_and_update(form,newval)\n ok = form.process(session=None,formname='editcat',onvalidation=dsau, message_onsuccess=message,message_on_failure= 'Erreurs dans le formulaire', next=URL('manage_categories',args=[request.args(0)],user_signature='True')).accepted\n #record.update_record(Parent_Id = request.vars.select_id)\n #session.flash = XML('Modification effectuée')\n # redirect(URL('manage_categories',user_signature='True'))\n if form.errors:\n if form.errors.Nom:\n response.flash = form.errors.Nom\n else:\n response.flash = 'Erreur(s) dans le formulaire'\n return dict(liste_cat=zeform)\n else:\n menu_ajout = DIV(A(SPAN(_class=\"icon plus icon-plus glyphicon glyphicon-plus\"),SPAN(\"Ajouter\",_class=\"buttontext button\", _title=\"Add record to database\"),_class=\"button btn btn-default btn-secondary\",_href=URL(\"manage_categories\",args=[request.args(0),\"new\"],vars=dict(_init='1'),user_signature=True)),_class=\"web2py_console\")\n return dict(liste_cat=XML('\\n'+XML(menu_ajout)+'\\n
    \\n
      \\n
    • '+racine[0].Nom+' ('+str(nb_objets_racine)+')
    • \\n'+show_listcat(racine[0].id)+'\\n

    \\n'))\n\n\n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef makeqrcode():\n import socket, qrcode\n \n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\",80))\n ip = s.getsockname()[0]\n qr = qrcode.QRCode(error_correction=qrcode.constants.ERROR_CORRECT_L,box_size=10,border=4)\n qr.add_data('https://'+ip+'/quizzpi/runquizz')\n qr.make(fit=True)\n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\n img.save(\"/home/www-data/web2py/applications/quizzpi/static/images/qrcode.png\")\n return locals()\n \n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef prelaunchquizz():\n # TODO : on pourrait faire un merge de prelaunchquizz et launchquizz\n idquizz = int(request.args[0])\n first_question = db(db.quizz_quizz.id == idquizz).select(db.quizz_quizz.Questions,db.quizz_quizz.Melange,db.quizz_quizz.Titre)\n # on recupere la liste des questions\n liste_q = first_question[0].Questions\n titre = first_question[0].Titre\n first_question = liste_q[0]\n vide=False\n if first_question != -1 :\n form_start = FORM(INPUT(_id='envoi',_type='submit',_class ='button btn btn-default btn-primary',_value='Commencer'),\n hidden=dict(nbtotaletu='-1'))\n if form_start.process().accepted:\n redirect(URL('launchquizz',args=request.args+[request.vars.nbtotaletu]))\n else:\n # on reinitialise la table des connexions\n db(db.etudiants.id >= 0).update(logged=False)\n # on initialise la table des reponses\n db.reponses_etudiants.truncate() \n # on initialise la table des questions generees\n db.gen_question.truncate() \n db.running_quizz.update_or_insert(db.running_quizz.idrunning==1,idrunning=1,idquizz = idquizz)\n else:\n vide = True\n return locals()\n\n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef launchquizz():\n idquizz = int(request.args[0])\n first_question = db(db.quizz_quizz.id == idquizz).select(db.quizz_quizz.Questions,db.quizz_quizz.Melange)\n # on recupere la liste des questions\n liste_q = first_question[0].Questions\n melange = first_question[0].Melange\n first_question = liste_q[0]\n if melange == 'Oui':\n import random\n random.shuffle(liste_q) \n first_question = liste_q[0]\n db.running_quizz.update_or_insert(db.running_quizz.idrunning==1,idrunning=1,idquizz = idquizz,idquestion = first_question, liste_choix=[], liste_questions=liste_q)\n redirect(URL('preload_question',args=['quizzmode',first_question,idquizz,0,request.args(1)]))\n \n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers')) \ndef students():\n def delete_item(liste_id):\n for id in liste_id:\n db(db.etudiants.id == id).delete()\n session.flash = str(len(liste_id))+XML(\" étudiants(s) supprimé(s)\")\n \n #TODO modifier la base et utiliser le mécanisme de login de web2py\n student_grid = SQLFORM.grid(db.etudiants, fields=[db.etudiants.Nom, db.etudiants.Prenom, db.etudiants.Courriel,db.etudiants.Filiere,db.etudiants.logged], deletable=True, editable=True, duplicatable=False, showbuttontext=False,selectable = [('Supprimer',lambda ids : delete_item(ids))], exportclasses=dict(xml=False,html=False,json=False,tsv=False,tsv_with_hidden_cols=False,csv=False,csv_with_hidden_cols=False), client_side_delete=True, user_signature=True)\n #my_extra_element = INPUT(_type='hidden', _name='liste_st_to_del', _value='-1')\n #student[1][0].append(my_extra_element) \n formcsv = FORM(XML('Cliquer sur Parcourir pour importer un fichier CSV '),BR(),BR(),\n INPUT(_type='file', _name='csvfile'),\n INPUT(_type='hidden', _value='db.etudiants', _name='table'),BR(),BR(),\n INPUT(_type='submit', _value=T('import'), _class='btn btn-primary'))\n if formcsv.process().accepted:\n try:\n db.etudiants.import_from_csv_file(request.vars.csvfile.file)\n session.flash = T('Liste mise à jour')\n except Exception as e:\n response.flash = DIV(T('Erreur dans le fichier CSV'), PRE(str(e)))\n else:\n redirect(URL('students',user_signature=True))\n return locals()\n \ndef preload_resultat():\n # juste utiliser pour lancer la vue correspondante\n return locals()\n\n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers'))\ndef show_resultat():\n # code tres similaire a verif_reponse\n # code utilisé pour générer le camembert de la répartition des réponses\n # le & n'est utile que si plusieurs enseignants utilisent quizzpi sur la meme\n # machine. Cette fonctionnalité n'est pas encoe entièrement développé. TODO.\n q_good_answer = db((db.gen_question.QuestionId == request.args[2]) & (db.gen_question.user_id == auth.user.id)).select(db.gen_question.QuestionId,db.gen_question.Langage,db.gen_question.Verification,db.gen_question.Reponse_calcule)\n type_q = 'texte'\n is_multiple = db(db.running_quizz.idrunning == 1).select(db.running_quizz.liste_choix)\n is_multiple = is_multiple[0].liste_choix\n # on regarde si on est sur une question de type choix multiple\n nb_choix = len(is_multiple)\n # compteur pour les réponses incomplètes\n cpt_incomplet = 0\n # compteur pour les bonnes réponses\n cptok = 0\n # compteur du nombre total d'étudiants ayant répondu ou non\n cpt = 0\n # compteur du nombre de réponses reçues\n # NON UTILISE por l'instant\n cpt_rep = 0\n if nb_choix > 0:\n # on est sur une question à choix multiple mais c'est peut être une question\n # de type VRAI ou FAUX auquel cas on ne devra pas aficher le compteur des réponses incomplètes\n type_q = 'multiple'\n good_answer = ast.literal_eval(q_good_answer[0].Reponse_calcule)\n good_answer.sort()\n nb_good_answer = len(good_answer)\n if nb_good_answer > 1:\n # plusieurs choix sont possibles, on positionne la variable type_q\n # de façon à ce que dans la vue correspondante, le compteur\n # des questions incomplètes ne soit pas affiché\n type_q = 'multiple_answer'\n for reponse in db(db.reponses_etudiants.idquestion == request.args(2)).select(db.reponses_etudiants.reponse,db.reponses_etudiants.idetudiant):\n # on verifie que l'etudiant a repondu\n if reponse.reponse != '__-1__':\n cpt_rep += 1\n # la reponse de l'etudiant est une chaine de car.\n # avec : pour séparateur\n liste_reponse = reponse.reponse.split(\":\")\n # on la transforme en liste d'entiers\n liste_reponse = list(map(int,liste_reponse))\n liste_reponse.sort()\n if liste_reponse == good_answer:\n cptok += 1\n db((db.reponses_etudiants.idquestion == request.args(2)) & (db.reponses_etudiants.idetudiant == reponse.idetudiant)).update(correct=True)\n else:\n # on vérifie si c'est un sous-ensemble des réponses possibles\n oncontinue = (liste_reponse[0] in good_answer)\n j = 1\n while oncontinue and (j < len(liste_reponse)):\n oncontinue = (liste_reponse[j] in good_answer)\n j += 1\n if oncontinue:\n cpt_incomplet += 1\n db((db.reponses_etudiants.idquestion == request.args(2)) & (db.reponses_etudiants.idetudiant == reponse.idetudiant)).update(incomplet=True)\n else:\n db((db.reponses_etudiants.idquestion == request.args(2)) & (db.reponses_etudiants.idetudiant == reponse.idetudiant)).update(nonrepondu=True)\n\n # on compte quand même, même si l'étudiant n'a pas répondu \n cpt += 1\n else:\n # on est sur une simple question ou il faut saisir la reponse\n for reponse in db(db.reponses_etudiants.idquestion == request.args(2)).select(db.reponses_etudiants.reponse,db.reponses_etudiants.idetudiant):\n if reponse.reponse != '__-1__':\n cpt_rep += 1\n if q_good_answer[0].Verification is None:\n good_answer = q_good_answer[0].Reponse_calcule\n lareponse = unquote(reponse.reponse)\n if lareponse == good_answer:\n cptok += 1\n db((db.reponses_etudiants.idquestion == request.args(2)) & (db.reponses_etudiants.idetudiant == reponse.idetudiant)).update(correct=True)\n else:\n ok2,mess = execute_script_verif(unquote(reponse.reponse),q_good_answer[0].QuestionId,q_good_answer[0].Langage,q_good_answer[0].Verification)\n if mess == \"\":\n # le script s'est execute\n # soit il a renvoye True car resultat correct\n # soit il a renvoye False car resultat incorrect\n # soit il a renvoye False + message car syntaxe de la reponse incorrect\n ok = (ok2 == 'True')\n if ok:\n cptok += 1\n db((db.reponses_etudiants.idquestion == request.args(2)) & (db.reponses_etudiants.idetudiant == reponse.idetudiant)).update(correct=True)\n else:\n # l'etudiant n'a pas repondu\n db((db.reponses_etudiants.idquestion == request.args(2)) & (db.reponses_etudiants.idetudiant == reponse.idetudiant)).update(nonrepondu=True) \n cpt+=1\n if int(request.args(1)) == -1:\n # on est arrive a la fin du quizz \n db.running_quizz.truncate()\n db.running_quizz.insert(idrunning=1,idquizz = -1,idquestion=-1, liste_choix=[])\n # on \"deconnecte\" les etudiants\n db(db.etudiants.id >= 0).update(logged=False)\n return locals()\n\n@auth.requires(auth.has_membership(group_id='superuser') or auth.has_membership(group_id='managers'))\ndef checkstudent():\n # fonction qui renvoie le nombre d'etudiants connectes\n # ainsi que le nombre d'etudiants ayant repondu\n # request.args(0) = flag, si présent et égal à 0 on veur avoir le nombre d'etudiants qui ont répondu\n # request.args(1) = id question courante\n nbetu = db(db.etudiants.logged == True).count()\n if request.args(0) == '0':\n nbreponse = db(db.reponses_etudiants.idquestion == request.args(1)).count()\n return str(nbreponse)+'/'+str(nbetu)\n else:\n return str(nbetu)\n\n# ---- Action for login/register/etc (required for auth) -----\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n http://..../[app]/default/user/bulk_register\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users\n \"\"\"\n # on initialise le quizz actif à -1\n db.running_quizz.truncate()\n db.running_quizz.insert(idrunning=1,idquizz = -1,idquestion=-1, liste_choix=[])\n # on initialise la table des reponses\n db.reponses_etudiants.truncate()\n # on initialise la table des questions generees\n db.gen_question.truncate()\n db(db.etudiants.id >= 0).update(logged=False)\n return dict(form=auth())\n\n# ---- API (example) -----\n@auth.requires_login()\ndef api_get_user_email():\n if not request.env.request_method == 'GET': raise HTTP(403)\n return response.json({'status':'success', 'email':auth.user.email})\n\n# ---- Smart Grid (example) -----\n@auth.requires(auth.has_membership(group_id='superuser')) \ndef grid():\n response.view = 'generic.html' # use a generic view\n tablename = request.args(0)\n if not tablename in db.tables: raise HTTP(403)\n grid = SQLFORM.grid(db[tablename], args=[tablename], deletable=False, editable=True, user_signature=False)\n return locals()\n\n\n\n# ---- action to server uploaded static content (required) ---\n@cache.action()\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request, db)\n","repo_name":"p-veron/QuizzPi","sub_path":"quizzpi/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":57893,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"30645092254","text":"import os # noqa\n\nfrom src.task3.constants import LOG, RESULTS_PATH # noqa\nfrom src.task3.io import read_session_data, write_to_parquet # noqa\nfrom src.task3.transformations import (\n create_popular_songs,\n create_users_and_distinct_songs_count,\n longest_sessions_with_tracklist,\n)\nfrom src.task3.utils import spark_session_setup\n\n\ndef main():\n spark = spark_session_setup()\n df = read_session_data(spark)\n songs_per_user = create_users_and_distinct_songs_count(df)\n # write_to_parquet(songs_per_user, os.path.join(RESULTS_PATH, \"distinct_songs.parquet\"))\n LOG.info(\n f\"Sample list of distinct Songs per user: \\n {songs_per_user.collect()[:10]}\"\n )\n popular_songs = create_popular_songs(df)\n # write_to_parquet(popular_songs, os.path.join(RESULTS_PATH, \"popular_songs.parquet\"))\n LOG.info(f\"List of top 100 popular songs:\\n {popular_songs.collect()}\")\n df_sessions = longest_sessions_with_tracklist(df)\n # write_to_parquet(df_sessions, os.path.join(RESULTS_PATH, \"longest_sessions.parquet\"))\n LOG.info(\"Longest sessions with track list:\\n\")\n df_sessions.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ryankarlos/bigdataeng","sub_path":"src/task3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14379133080","text":"class Node:\n\n def __init__(self,initdata):\n self.data = initdata\n self.next = None\n\n def getData(self):\n return self.data\n\n def getNext(self):\n return self.next\n\n def setData(self,newdata):\n self.data = newdata\n\n def setNext(self,newnext):\n self.next = newnext\n\nclass Orderedlist():\n\n def __init__(self):\n self.head = None\n\n def isEmpty(self):\n return self.head == None\n\n def add(self, item):\n temp = Node(item)\n previous = None\n current = self.head\n\n while current:\n if current.getData() > item:\n break\n else:\n previous = current\n current = current.getNext()\n if not previous: # add to head\n self.head = temp\n temp.setNext(current)\n else:\n previous.setNext(temp)\n temp.setNext(current)\n\n def size(self):\n\n count = 0\n current = self.head\n while current:\n count += 1\n current = current.getNext()\n return count\n\n def search(self, item):\n\n found = False\n current = self.head\n while not found and current:\n if current.getData() > item:\n return False\n elif current.getData() == item:\n found = True\n else:\n current = current.getNext()\n return found\n\n def remove(self, item):\n\n current = self.head\n previous = None\n found = False\n while not found and current:\n if current.getData() == item:\n found = True\n elif current.getData() > item:\n raise ValueError(f\"{item} is not in list\")\n else:\n previous = current\n current = current.getNext()\n\n if not found:\n raise ValueError(f\"{item} is not in list\")\n\n if previous == None:\n self.head = current.getNext()\n else:\n previous.setNext(current.getNext())\n\n def append(self, item):\n\n previous = None\n current = self.head\n last = Node(item)\n while current:\n previous = current\n current = current.getNext()\n if not previous:\n self.head = last\n elif previous.getData() > item:\n raise ValueError(f\"{item} is not valid\")\n else:\n previous.setNext(last)\n last.setNext(None)\n\n def index(self, item):\n\n found = False\n current = self.head\n index = 0\n while not found and current:\n if current.getData() == item:\n found = True\n elif current.getData() > item:\n raise ValueError(f\"{item} is not in list\")\n else:\n current = current.getNext()\n index += 1\n if found:\n return index\n else:\n raise ValueError(f\"{item} is not in list\")\n\n def insert(self, pos, item):\n\n node = Node(item)\n ps = 0\n previous = None\n current = self.head\n\n while ps != pos:\n previous = current\n current = current.getNext()\n ps +=1\n\n if not previous:\n self.head = node\n else:\n if previous.getData() <= item <= current.getData():\n previous.setNext(node)\n else:\n raise ValueError(f\"{item} is not valid\")\n node.setNext(current)\n\n def pop(self, pos=None):\n\n if pos == None:\n previous = None\n current = self.head\n\n while current.getNext():\n previous = current\n current = current.getNext()\n if not previous:\n self.head = None\n else:\n previous.setNext(None)\n else:\n ps = 0\n previous = None\n current = self.head\n while ps != pos:\n previous = current\n current = current.getNext()\n ps += 1\n\n if not previous:\n self.head = current.getNext()\n else:\n previous.setNext(current.getNext())\n\n\n return current.getData()\n\n\n\n\nif __name__ == '__main__':\n orderedlist = Orderedlist()\n print(orderedlist.isEmpty())\n orderedlist.add(6)\n orderedlist.add(7)\n orderedlist.add(8)\n # myLinkedlist.remove('b')\n # print(orderedlist.isEmpty())\n # print(orderedlist.size())\n print(orderedlist.search(3))\n print(orderedlist.search(7))\n # orderedlist.remove(8)\n # print(orderedlist.search(8))\n print(orderedlist.size())\n # print(orderedlist.search(6))\n # orderedlist.remove(6)\n # print(orderedlist.search(6))\n # print(orderedlist.size())\n # orderedlist.append(5)\n orderedlist.append(9)\n print(orderedlist.size())\n # print(orderedlist.search(3))\n print(orderedlist.index(9))\n # print(myLinkedlist.remove(8))\n print(orderedlist.pop(0))\n print(orderedlist.pop())\n # orderedlist.append('d')\n print(orderedlist.pop())\n # print(orderedlist.pop())\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Super-Louis/Algorithm","sub_path":"data_structure/orderedlist_python.py","file_name":"orderedlist_python.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6519386175","text":"\"\"\"\nModule regroupant les fonctions utilisée pour parser les informations des sources externes\n\"\"\"\n################### WIKIPEDIA ###################\n# Fonctions utilisées dans le notebook wikipédia #\n##################################################\n\n\nfrom Identification_couples_livres.extract_books_from_DB import *\n\ndef format_list(page):\n \"\"\"\n Prend une page wikipédia et supprime les indications de colonnes pour permettre au parseur de trouver les listes\n :param str page: page wikipédia\n :return: même page sans {{colonnes|taille=|nombre=2| ... }} tout en conservant le contenu\n \"\"\"\n page = re.sub(r'\\*\\*', '*', page)\n page_serach_re = re.search(r'\\{\\{colonnes\\|.*?\\|\\n', page, re.DOTALL)\n while page_serach_re:\n nb_bracket = 0\n start_index = page_serach_re.span()[0]\n for index, caractere in enumerate(page[start_index:]):\n if caractere == '{':\n nb_bracket += 1\n elif caractere == '}':\n nb_bracket -= 1\n if nb_bracket == 0:\n page = page[:start_index + index - 1] + page[start_index + index + 2:]\n page = re.sub(r'\\{\\{colonnes\\|.*?\\|\\n', '', page)\n break\n page_serach_re = re.search(r'\\{\\{colonnes\\|.*?\\|\\n', page, re.DOTALL)\n return page\n\ndef split_info_box(page):\n \"\"\"\n Sépare un page wikipédia en son infobox et le reste de la page. Si il n'y a pas d'Infobox, retourne un champs vide\n pour L'infobox, et la page complete pour le reste de la page\n :param str page: page wikipédia\n :return (str, str): Infobox et contenu de la page sans l'infobox\n \"\"\"\n nb_bracket = 0\n start_index = page.find(\"{{Infobox\")\n for index, caractere in enumerate(page[start_index:]):\n if caractere == '{':\n nb_bracket += 1\n elif caractere == '}':\n nb_bracket -= 1\n if nb_bracket == 0:\n return page[:start_index + index + 1], page[start_index + index + 2:]\n return \"\", page\n\ndef get_info_from_infobox(raw_infos):\n \"\"\"\n retourne les champs de l'infobox sous forme de dictionnaire à partir de sa chaine de caractère, ou si\n :param str raw_infos: chaine de caractère de l'infobox\n :return dict[str, str]: champs de l'infobox\n \"\"\"\n infos = {}\n for info in re.split(r\"\\n\\||\\n\\}\", raw_infos, flags=re.DOTALL)[1:-1]:\n if len(info.split('=')) > 1:\n key, value = info.split('=')[0], info.split('=')[1]\n infos[key] = value\n return infos\n\n\ndef parse_section(section, title_ls, level):\n \"\"\"\n Trie les informations que l'on peut récupérer à part d'une section tirée du parseur,\n et fait de même pour ses sous-sections si elles existent\n :param wikitextparser.Section section: Section de la page wikipedia\n :param set title_ls: liste des intitulés des sections\n :param int level: Niveau de la section (section principale, secondaire, tertiaire, etc...)\n :return dict[str: UNION[dict, list, int]]: informations pertinantes sur la section et ses sous-sections\n \"\"\"\n\n section_dict = {'content': section.contents,\n 'list': [re.sub(r\"\\'\\'\", \"\", item) for list in section.get_lists() for item in list.items],\n 'sub_section': {},\n 'level':level\n }\n\n # les deux premières sections sont toujours: \"\\n\", section_actuelle\n if len(section.sections) > 2:\n sub_sections = section.sections[2:]\n for sub_sect in sub_sections:\n # On verifie bien qu'ils sagit de sous-sections\n if sub_sect.level == level + 1:\n title_ls.add(sub_sect.title)\n # Si il y a des sous-sections, on applique la fonction sur celles-ci\n section_dict['sub_section'][sub_sect.title] = parse_section(sub_sect, title_ls, sub_sect.level)\n\n return section_dict\n\ndef extract_title_from_list(list):\n \"\"\"\n Extrait le titre des livres depuis les listes d'items générée par le parseur\n 2 étapes:\n - Si les données sont structurées : {{Ouvrage|titre=... }}, on récupère directement le titre\n - Si les donnée ne sont pas structurée: ''L'Effet des rayons gamma sur les vieux garçons'' (de [[Paul Zindel]]), Leméac ([[1970]])\n On doit eliminer les infornations en trop et ne garder que le titre.\n :param list[str] list: liste du parseur d'où extraire les titres\n :return list[str]: liste comportant uniquement les informations des titres\n \"\"\"\n titles = []\n for item in [item.replace('\\n', '') for item in list]:\n added = False\n\n # Si les informations sur les livres sont structurées\n meta_data_re = re.search(r\"\\{\\{(Ouvrage|Écrit).*\\}\\}\", item)\n if meta_data_re:\n meta_data = meta_data_re.group()\n # On selectionne le titre et le sous titre\n title_re = re.search(r'(?<=titre=).*?(?=\\|)', meta_data)\n subtitle_re = re.search(r'(?<=sous-titre=).*?(?=\\|)', meta_data)\n # Quand un sous-titre est présent, C'est souvent un tome d'une série. On concatène le titre et le sous-titre\n if title_re or subtitle_re:\n added = True\n if title_re and subtitle_re:\n titles.append(normalize(title_re.group() + ' ' + subtitle_re.group()))\n elif title_re:\n titles.append(normalize(title_re.group()))\n elif subtitle_re:\n titles.append(normalize(subtitle_re.group()))\n\n # Si les informations ne sont pas structurées, ou que l'extraction a échouée\n if not added:\n # On retire les \"{{ ... }}\", ce sont des méta-data\n n = 1\n while n:\n item, n = re.subn(r\"\\{\\{.*\\}\\}\", '', item)\n n = 1\n # On retire les balises html, ce sont des liens vers d'autres pages\n while n:\n item, n = re.subn(r\"<.+?>.*?\", '', item)\n # On seletionne les élements entre [[...]]\n brackets_re = re.search(r'(?<=\\[\\[).*?(?=\\]\\])', item)\n while brackets_re:\n # Si on rencontre cette configuration: [[info1|info2|info3]], il s'agit de la même information\n # sous plusieures formes différentes. On choisi arbitrairement la dernière et on supprime les [[ et ]]\n item = re.sub(r\"\\[\\[.*?\\]\\]\", brackets_re.group().split('|')[-1], item, count=1)\n brackets_re = re.search(r'(?<=\\[\\[).*?(?=\\]\\])', item)\n # On retire dans l'ordre le texte entre parenthèse, les reférences, les années en début de chaine,\n # les espaces, les tirets et les \":\" en début de chaine.\n item = remove_text_between_parentheses(item)\n item = re.sub(r'', '', item)\n item = re.sub(r'^\\s*?\\d{4}', '', item)\n item = re.sub(r'^-\\s', '', item)\n item = re.sub(r'^:', '', item)\n if normalize(item.split(',')[0]):\n titles.append(normalize(item.split(',')[0]))\n\n return titles","repo_name":"mchlggnn/projet-mccq-secteur-livre","sub_path":"ExtractionWikipedia/external_sources_module.py","file_name":"external_sources_module.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31629126534","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nRun a YOLO_v3 style detection model on test images.\r\n\"\"\"\r\n\r\nimport colorsys\r\nimport csv\r\nimport os\r\nimport random\r\nfrom timeit import time\r\nfrom timeit import default_timer as timer ### to calculate FPS\r\nimport json\r\nimport numpy as np\r\nfrom keras import backend as K\r\nfrom keras.models import load_model\r\nfrom PIL import Image, ImageFont, ImageDraw\r\nimport cv2\r\nfrom yolo3.model import yolo_eval\r\nfrom yolo3.utils import letterbox_image\r\n\r\nclass YOLO(object):\r\n def __init__(self):\r\n self.model_path = 'model_data/logoD.h5'\r\n self.anchors_path = 'model_data/logoD_anchors.txt'\r\n self.classes_path = 'model_data/logoD_classes.txt'\r\n self.output_path = 'output/'\r\n self.score = 0.3\r\n self.iou = 0.5\r\n self.class_names = self._get_class()\r\n self.anchors = self._get_anchors()\r\n self.sess = K.get_session()\r\n self.model_image_size = (608, 608) # fixed size or (None, None)\r\n self.is_fixed_size = self.model_image_size != (None, None)\r\n self.boxes, self.scores, self.classes = self.generate()\r\n\r\n def _get_class(self):\r\n classes_path = os.path.expanduser(self.classes_path)\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\n def _get_anchors(self):\r\n anchors_path = os.path.expanduser(self.anchors_path)\r\n with open(anchors_path) as f:\r\n anchors = f.readline()\r\n anchors = [float(x) for x in anchors.split(',')]\r\n anchors = np.array(anchors).reshape(-1, 2)\r\n return anchors\r\n\r\n def savebbox_txt(self, boxes, classes, scores, savebbox_path):\r\n with open(savebbox_path, 'w') as f:\r\n f.write('%d\\n' % len(boxes))\r\n for i, c in reversed(list(enumerate(classes))):\r\n predicted_class = self.class_names[c]\r\n box = ' '.join(map(str, boxes[i].astype('int32')))\r\n score = scores[i]\r\n f.write(' '.join((box, predicted_class, '{:.4f}'.format(score))) + '\\n')\r\n print('Image %s saved' % (savebbox_path))\r\n\r\n def savebbox_js(self, boxes, classes, scores, savebbox_path):\r\n with open(savebbox_path, 'w') as f:\r\n a = {}\r\n for i, c in reversed(list(enumerate(classes))):\r\n if self.class_names[c] not in a:\r\n a[self.class_names[c]] = []\r\n b = {}\r\n b['score'] = str('{:.2f}'.format(scores[i]))\r\n b['top'], b['left'], b['bottom'], b['right'] = boxes[i].astype('int32').astype('str')\r\n a[self.class_names[c]].append(b)\r\n json.dump(a, f)\r\n\r\n def generate(self):\r\n model_path = os.path.expanduser(self.model_path)\r\n assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'\r\n\r\n self.yolo_model = load_model(model_path, compile=False)\r\n\r\n # from keras.models import Model\r\n # model1 = Model(inputs=self.yolo_model.input, outputs=self.yolo_model.get_layer('leaky_re_lu_58').output)\r\n print(self.yolo_model.summary())\r\n\r\n print('{} model, anchors, and classes loaded.'.format(model_path))\r\n\r\n # Generate colors for drawing bounding boxes.\r\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\r\n for x in range(len(self.class_names))]\r\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\r\n self.colors = list(\r\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\r\n self.colors))\r\n random.seed(10101) # Fixed seed for consistent colors across runs.\r\n random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\r\n random.seed(None) # Reset seed to default.\r\n\r\n # Generate output tensor targets for filtered bounding boxes.\r\n self.input_image_shape = K.placeholder(shape=(2, ))\r\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\r\n len(self.class_names), self.input_image_shape,\r\n score_threshold=self.score, iou_threshold=self.iou)\r\n return boxes, scores, classes\r\n\r\n def detect_image(self, image, output_json_prefix = None, show_bounding_box = True):\r\n start = time.time()\r\n\r\n if self.is_fixed_size:\r\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\r\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\r\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\r\n else:\r\n new_image_size = (image.width - (image.width % 32),\r\n image.height - (image.height % 32))\r\n boxed_image = letterbox_image(image, new_image_size)\r\n image_data = np.array(boxed_image, dtype='float32')\r\n\r\n print(image_data.shape)\r\n image_data /= 255.\r\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\r\n\r\n out_boxes, out_scores, out_classes = self.sess.run(\r\n [self.boxes, self.scores, self.classes],\r\n feed_dict={\r\n self.yolo_model.input: image_data,\r\n self.input_image_shape: [image.size[1], image.size[0]],\r\n # self.input_image_shape: [image.size[1], image.size[0]],\r\n K.learning_phase(): 0\r\n })\r\n\r\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\r\n\r\n\r\n if output_json_prefix is not None:\r\n head, tail = os.path.split(output_json_prefix)\r\n # savebbox_path = str('output/%s.txt' % (tail.split('.')[0]))\r\n savebbox_path = str(self.output_path + '%s.json' % (tail.split('.')[0]))\r\n self.savebbox_js(out_boxes, out_classes, out_scores, savebbox_path)\r\n\r\n if show_bounding_box:\r\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\r\n thickness = (image.size[0] + image.size[1]) // 300\r\n\r\n for i, c in reversed(list(enumerate(out_classes))):\r\n predicted_class = self.class_names[c]\r\n box = out_boxes[i]\r\n score = out_scores[i]\r\n\r\n label = '{} {:.2f}'.format(predicted_class, score)\r\n draw = ImageDraw.Draw(image)\r\n label_size = draw.textsize(label, font)\r\n\r\n top, left, bottom, right = box\r\n top = max(0, np.floor(top + 0.5).astype('int32'))\r\n left = max(0, np.floor(left + 0.5).astype('int32'))\r\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\r\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\r\n print(label, (left, top), (right, bottom))\r\n\r\n if top - label_size[1] >= 0:\r\n text_origin = np.array([left, top - label_size[1]])\r\n else:\r\n text_origin = np.array([left, top + 1])\r\n\r\n # image drawing\r\n for i in range(thickness):\r\n draw.rectangle(\r\n [left + i, top + i, right - i, bottom - i],\r\n outline=self.colors[c])\r\n draw.rectangle(\r\n [tuple(text_origin), tuple(text_origin + label_size)],\r\n fill=self.colors[c])\r\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\r\n del draw\r\n end = time.time()\r\n print('Take %d seconds to detect.' % (end - start))\r\n return image\r\n\r\n def detect_video_frame(self, image, frame_id):\r\n start = timer()\r\n if self.model_image_size != (None, None):\r\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\r\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\r\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\r\n else:\r\n new_image_size = (image.width - (image.width % 32),\r\n image.height - (image.height % 32))\r\n boxed_image = letterbox_image(image, new_image_size)\r\n image_data = np.array(boxed_image, dtype='float32')\r\n\r\n print(image_data.shape)\r\n image_data /= 255.\r\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\r\n\r\n out_boxes, out_scores, out_classes = self.sess.run(\r\n [self.boxes, self.scores, self.classes],\r\n feed_dict={\r\n self.yolo_model.input: image_data,\r\n self.input_image_shape: [image.size[1], image.size[0]],\r\n K.learning_phase(): 0\r\n })\r\n\r\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\r\n\r\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\r\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\r\n thickness = (image.size[0] + image.size[1]) // 300\r\n frame_rows = []\r\n for i, c in reversed(list(enumerate(out_classes))):\r\n single_row = []\r\n predicted_class = self.class_names[c]\r\n box = out_boxes[i]\r\n score = out_scores[i]\r\n\r\n label = '{} {:.2f}'.format(predicted_class, score)\r\n draw = ImageDraw.Draw(image)\r\n label_size = draw.textsize(label, font)\r\n\r\n top, left, bottom, right = box\r\n top = max(0, np.floor(top + 0.5).astype('int32'))\r\n left = max(0, np.floor(left + 0.5).astype('int32'))\r\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\r\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\r\n print(label, (left, top), (right, bottom))\r\n\r\n if top - label_size[1] >= 0:\r\n text_origin = np.array([left, top - label_size[1]])\r\n else:\r\n text_origin = np.array([left, top + 1])\r\n\r\n # drawing\r\n for i in range(thickness):\r\n draw.rectangle(\r\n [left + i, top + i, right - i, bottom - i],\r\n outline=self.colors[c])\r\n draw.rectangle(\r\n [tuple(text_origin), tuple(text_origin + label_size)],\r\n fill=self.colors[c])\r\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\r\n del draw\r\n\r\n brand = predicted_class.split('-')[0]\r\n location = predicted_class.split('-')[1]\r\n w = right - left\r\n h = bottom - top\r\n area = w*h\r\n pecentage = area*100/(image.width*image.height)\r\n central_x = (left + right)/2\r\n central_y = (top + bottom)/2\r\n if image.width//4 image.width//2 and central_y<=image.height//2:\r\n position = 'C'\r\n elif central_x<=image.width//2 and central_y>image.height//2:\r\n position = 'D'\r\n elif central_x>image.width//2 and central_y>image.height//2:\r\n position = 'E'\r\n single_row.append(frame_id)\r\n single_row.append(brand)\r\n single_row.append(location)\r\n single_row.append(area)\r\n single_row.append(pecentage)\r\n single_row.append(central_x)\r\n single_row.append(central_y)\r\n single_row.append(position)\r\n single_row.append(score)\r\n frame_rows.append(single_row)\r\n end = timer()\r\n print(end - start)\r\n return image, frame_rows\r\n\r\n def detect_video(self,\r\n video_path,\r\n ms_per_frame = 1000,\r\n save_video_with_boundingbox = True,\r\n save_bounding_box_info=True,\r\n show_result_live=True):\r\n\r\n vid = cv2.VideoCapture(video_path)\r\n if not vid.isOpened():\r\n raise IOError(\"Couldn't open video\")\r\n _, tail = os.path.split(video_path)\r\n\r\n\r\n if save_video_with_boundingbox:\r\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\r\n video_fps = vid.get(cv2.CAP_PROP_FPS)\r\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\r\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\r\n # isOutput = True if output_path != \"\" else False\r\n # if isOutput:\r\n output_path = tail.split('.')[0]+ \"_with_BB.\" + tail.split('.')[1]\r\n out = cv2.VideoWriter(output_path, video_FourCC, 1/(ms_per_frame/1000), video_size)\r\n print('input video fps:{}'.format(video_fps))\r\n print('output video fps:{}'.format(1/(ms_per_frame/1000)))\r\n\r\n accum_time = 0\r\n curr_fps = 0\r\n fps = \"FPS: ??\"\r\n prev_time = timer()\r\n\r\n if save_bounding_box_info:\r\n fid = open('%s.csv' % (tail.split('.')[0]), 'w', newline='')\r\n csvfile = csv.writer(fid, delimiter=',')\r\n csvfile.writerow([col_name.strip() for col_name in\r\n 'frame_id, brand, location, size, pecentage, x, y, position, confidence'.split(',')])\r\n\r\n # with open('%s.csv' % (tail.split('.')[0]), 'w', newline='') as fid:\r\n # csvfile = csv.writer(fid, delimiter=',')\r\n # csvfile.writerow([col_name.strip() for col_name in 'frame_id, brand, location, size, pecentage, x, y, position, confidence'.split(',')])\r\n count = 0\r\n while True:\r\n vid.set(cv2.CAP_PROP_POS_MSEC, ((count * ms_per_frame)))\r\n return_value, frame = vid.read()\r\n if return_value:\r\n try:\r\n image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n except:\r\n image = Image.fromarray(frame)\r\n else:\r\n break\r\n image, frame_rows = self.detect_video_frame(image, count)\r\n\r\n result = np.asarray(image)\r\n result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)\r\n\r\n if show_result_live:\r\n curr_time = timer()\r\n exec_time = curr_time - prev_time\r\n prev_time = curr_time\r\n accum_time = accum_time + exec_time\r\n curr_fps = curr_fps + 1\r\n if accum_time > 1:\r\n accum_time = accum_time - 1\r\n fps = \"FPS: \" + str(curr_fps)\r\n curr_fps = 0\r\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\r\n fontScale=0.50, color=(255, 0, 0), thickness=2)\r\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"result\", result)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n if save_video_with_boundingbox:\r\n out.write(result)\r\n\r\n if save_bounding_box_info:\r\n csvfile.writerows(frame_rows)\r\n count = count + 1\r\n print('count:{}'.format(count))\r\n vid.release()\r\n out.release()\r\n self.close_session()\r\n\r\n def close_session(self):\r\n self.sess.close()\r\n\r\n# def detect_video(yolo, video_path, show_result_live = True):\r\n# import cv2\r\n# vid = cv2.VideoCapture(video_path)\r\n# if not vid.isOpened():\r\n# raise IOError(\"Couldn't open video\")\r\n# accum_time = 0\r\n# curr_fps = 0\r\n# fps = \"FPS: ??\"\r\n# prev_time = timer()\r\n# _, tail = os.path.split(video_path)\r\n# count = 0\r\n# while True:\r\n# return_value, frame = vid.read()\r\n# # print(cv2.CAP_PROP_POS_MSEC)\r\n# cv2.imshow(\"OpenCV\", frame)\r\n# image = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))\r\n# image = yolo.detect_image(image,\r\n# output_json_prefix = tail + 'frame'+ str(count),\r\n# show_bounding_box = show_result_live\r\n# )\r\n#\r\n# if show_result_live:\r\n# result = np.asarray(image)\r\n# result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)\r\n# curr_time = timer()\r\n# exec_time = curr_time - prev_time\r\n# prev_time = curr_time\r\n# accum_time = accum_time + exec_time\r\n# curr_fps = curr_fps + 1\r\n# if accum_time > 1:\r\n# accum_time = accum_time - 1\r\n# fps = \"FPS: \" + str(curr_fps)\r\n# curr_fps = 0\r\n# cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\r\n# fontScale=0.50, color=(255, 0, 0), thickness=2)\r\n# cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\r\n# cv2.imshow(\"result\", result)\r\n# if cv2.waitKey(1) & 0xFF == ord('q'):\r\n# break\r\n# count += 1\r\n# yolo.close_session()\r\n\r\ndef detect_img(yolo):\r\n while True:\r\n img = input('Input image filename:')\r\n try:\r\n image = Image.open(img)\r\n except:\r\n print('Open Error! Try again!')\r\n continue\r\n else:\r\n r_image = yolo.detect_image(image, img)\r\n r_image.show()\r\n yolo.close_session()\r\n\r\ndef detect_multiple_imgs(yolo, folder_path):\r\n for file in os.listdir(folder_path):\r\n image = Image.open(img)\r\n yolo.detect_image(image, file)\r\n yolo.close_session()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # detect_multiple_imgs(YOLO())\r\n # detect_video(YOLO(), './test1.mp4')\r\n # detect_img(YOLO())\r\n\r\n y = YOLO()\r\n y.detect_video('./test1.mp4')","repo_name":"ant1pink/logo-detection","sub_path":"yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":18059,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"72134689798","text":"from datetime import datetime\nimport os\nfrom shutil import rmtree\nfrom django.http import response\nfrom django.shortcuts import redirect, render, reverse\nfrom django.views.generic import CreateView, DeleteView, DetailView, FormView, ListView, TemplateView\nfrom clientes.models import Empleado\nfrom fachadas.models import *\nfrom fachadas.forms import *\nfrom fachadas.functions import *\nfrom fachadas.mixins import EmpleadoRequiredMixin\nfrom functions import create_folder, write_file\n# Create your views here.\n\nCARPETA_FACHADAS = \"/home/anorak/Test/Fachadas/\"\n\nclass ObraListView(EmpleadoRequiredMixin, ListView):\n template_name = \"fachadas/lista_fachadas.html\"\n\n def get_queryset(self):\n return Obra.objects.all()\n\n def get_context_data(self, **kwargs):\n context = super(ObraListView, self).get_context_data(**kwargs)\n obras = Obra.objects.all()\n urls = [reverse(\"fachadas:eliminar-obra\", args=[i.id]) for i in obras]\n obras = zip(obras, urls)\n fecha = date.today()\n context.update({\n \"año_mes\": fecha.strftime(\"%Y-%m\"),\n \"dia\":fecha.strftime(\"%d\"),\n \"obras\": obras\n })\n return context\n\nclass ObraCreateView(EmpleadoRequiredMixin, CreateView):\n template_name =\"fachadas/iniciar_obra.html\"\n form_class = ObraModelForm\n\n def get_success_url(self):\n return reverse(\"fachadas:lista-obra\")\n\n def get_context_data(self, **kwargs):\n context = super(ObraCreateView, self).get_context_data(**kwargs)\n context.update({\n \"previous\":reverse(\"fachadas:lista-obra\")\n\n })\n return context \n\n def form_valid(self, form):\n form.instance.empelado = Empleado.objects.get(user=self.request.user)\n create_folder(CARPETA_FACHADAS + form.instance.nombre_obra)\n return super(ObraCreateView, self).form_valid(form)\n\nclass ObraDetailView(EmpleadoRequiredMixin, DetailView):\n template_name = \"fachadas/detalles_obra.html\"\n pk_url_kwarg = \"obra_pk\"\n queryset = Obra.objects.all()\n\n def get_success_url(self):\n return reverse(\"fachadas:detalles-obra\", args=[self.kwargs[\"obra_pk\"]])\n\n def get(self, request, *args, **kwargs):\n if kwargs[\"dia\"] == \"32\":\n obra = self.get_object()\n dict_fecha = unir_fecha(kwargs) \n costos_dict = costos_mes(dict_fecha, obra)\n dia_maximo = costos_dict[\"dias_mes\"][0]\n return redirect(\"fachadas:detalles-obra\", self.kwargs[\"obra_pk\"], self.kwargs[\"año_mes\"], dia_maximo)\n return super(ObraDetailView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(ObraDetailView, self).get_context_data(**kwargs)\n obra = self.get_object()\n trabajadores = Trabajador.objects.filter(obra = obra)\n pagos =[]\n try:\n pagos = Pago.objects.filter(obra=obra)\n pagos = [pagos.filter(trabajador=i).order_by(\"-fecha\").first() for i in trabajadores] \n except:\n pass\n trabajadores_pagos = zip(trabajadores, pagos)\n\n dict_fecha = unir_fecha(self.kwargs)\n costos_dict = costos_mes(dict_fecha, obra)\n costos_display = costos_dict[\"costos\"].filter(fecha=dict_fecha[\"fecha\"]) \n meses_unicos = [i[\"fecha\"].strftime(\"%Y-%m\") for i in costos_dict[\"costos\"].values(\"fecha\").annotate(n = models.Count(\"pk\"))]\n meses_unicos = valores_unicos(meses_unicos)\n\n try:\n meses_unicos.remove(dict_fecha[\"año_mes\"])\n except:\n pass\n\n meses_unicos.insert(0, dict_fecha[\"año_mes\"])\n #expenses to be disaplayed on the web page\n costos_display = asignar_acciones(costos_display, obra, dict_fecha[\"fecha\"])\n\n context.update({\n \"previous\":reverse(\"fachadas:lista-obra\"),\n \"obra\":obra,\n \"trabajadores\": trabajadores,\n \"costos\":costos_display,\n \"dias\":costos_dict[\"dias_mes\"],\n \"dict_fecha\":dict_fecha,\n \"meses_unicos\":meses_unicos,\n \"trabajadores_pagos\":trabajadores_pagos\n })\n return context\n\nclass ObraDeleteView(EmpleadoRequiredMixin, DeleteView):\n model = Obra\n http_method_names = ['delete']\n\n def dispatch(self, request, *args, **kwargs):\n instance = Obra.objects.get(id=self.kwargs[\"pk\"])\n rmtree(CARPETA_FACHADAS + instance.nombre_obra)\n handler = getattr(self, 'delete')\n return handler(request, *args, **kwargs)\n\n def get_success_url(self):\n success_url = str(reverse('fachadas:lista-obra'))\n return success_url\n\ndef pagar_nomina(request, pk, año_mes, dia):\n obra = Obra.objects.get(id=pk)\n trabajadores = Trabajador.objects.filter(obra=obra)\n if request.method == \"POST\":\n inicio_nomina = request.POST[\"inicio_nomina\"] \n final_nomina = request.POST[\"final_nomina\"]\n for trabajador in trabajadores:\n acciones = Accion.objects.filter(trabajador=trabajador, \n fecha__gte=inicio_nomina, \n fecha__lte=final_nomina)\n valor_nomina = sum([i.precio_unidad * i.cantidad for i in acciones])\n trabajador.acumulado -= valor_nomina\n if trabajador.acumulado < 0:\n trabajador.acumulado = 0\n obra.fecha_ultimo_pago = date.today() \n Pago.objects.create(fecha=date.today(), \n monto=valor_nomina, \n periodo_inicio=inicio_nomina, \n periodo_final=final_nomina, \n trabajador=trabajador, \n obra=obra)\n trabajador.save()\n obra.save()\n\n return redirect(reverse(\"fachadas:detalles-obra\", args=[pk, año_mes, dia]))\n\nclass CostoCreateView(EmpleadoRequiredMixin, FormView):\n template_name = \"fachadas/crear_costo.html\"\n form_class=CostoForm\n\n def get_success_url(self):\n dict = unir_fecha(self.kwargs)\n return reverse(\"fachadas:detalles-obra\", args=[self.kwargs[\"pk\"], dict[\"año_mes\"], dict[\"dia\"]])\n\n def get_form_kwargs(self):\n #pass kwarg with last visit\n kwargs = super().get_form_kwargs()\n kwargs[\"obra\"] = Obra.objects.get(id=self.kwargs[\"pk\"])\n return kwargs\n\n def get_context_data(self, **kwargs):\n dict = unir_fecha(self.kwargs)\n context = super(CostoCreateView, self).get_context_data(**kwargs)\n context.update({\n \"previous\":reverse(\"fachadas:detalles-obra\", args=[self.kwargs[\"pk\"], dict[\"año_mes\"], dict[\"dia\"]])\n })\n return context\n\n def form_valid(self, form):\n descripcion = form.cleaned_data[\"descripcion\"]\n cantidad = form.cleaned_data[\"cantidad\"]\n fecha = form.cleaned_data[\"fecha\"]\n precio_unidad = form.cleaned_data[\"precio_unidad\"]\n cobro_unidad = form.cleaned_data[\"cobro_unidad\"]\n obra = Obra.objects.get(id=self.kwargs[\"pk\"])\n trabajador = form.cleaned_data[\"trabajador\"]\n\n obra.costo_total += precio_unidad * cantidad\n obra.save()\n\n if form.cleaned_data[\"tipo\"] == \"COSTO\":\n Costo.objects.create(descripcion=descripcion, cantidad=cantidad, fecha=fecha, obra=obra, precio_unidad=precio_unidad, cobro_unidad=cobro_unidad)\n elif form.cleaned_data[\"tipo\"] == \"ACCION\":\n Accion.objects.create(descripcion=descripcion, cantidad=cantidad, fecha=fecha, obra=obra, precio_unidad=precio_unidad, cobro_unidad=cobro_unidad, trabajador=trabajador)\n trabajador.acumulado += precio_unidad * cantidad\n trabajador.save()\n\n return super().form_valid(form)\n\nclass TrabajadorCreateView(EmpleadoRequiredMixin, CreateView):\n template_name=\"fachadas/crear_trabajador.html\"\n form_class = TrabajadorModelForm\n\n def get_success_url(self):\n dict = unir_fecha(self.kwargs)\n return reverse(\"fachadas:detalles-obra\", args=[self.kwargs[\"pk\"], dict[\"año_mes\"], dict[\"dia\"]])\n\n def get_context_data(self, **kwargs):\n context = super(TrabajadorCreateView, self).get_context_data(**kwargs)\n dict = unir_fecha(self.kwargs)\n context.update({\n \"previous\":reverse(\"fachadas:detalles-obra\", args=[self.kwargs[\"pk\"], dict[\"año_mes\"], dict[\"dia\"]]) \n })\n return context\n\n def form_valid(self, form):\n obra = Obra.objects.get(id=self.kwargs[\"pk\"])\n form.instance.obra = obra\n return super().form_valid(form)\n\nclass PagoListView(EmpleadoRequiredMixin, ListView):\n template_name = 'fachadas/filtrar_pagos.html'\n queryset = Pago.objects.all()\n def get_context_data(self, **kwargs):\n context = super(PagoListView, self).get_context_data(**kwargs)\n inicio_pago = self.kwargs[\"inicio_pago\"]\n final_pago = self.kwargs[\"final_pago\"]\n obra = Obra.objects.get(id=self.kwargs[\"obra_pk\"])\n pagos = Pago.objects.filter(obra=obra, \n fecha__gte=inicio_pago, \n fecha__lte=final_pago)\n urls = [reverse(\"fachadas:eliminar-pago\", args=[self.kwargs[\"obra_pk\"], inicio_pago,final_pago, i.id]) for i in pagos]\n pagos = zip(pagos, urls)\n # do something with your data\n context.update({\"pagos\":pagos, \n \"obra\":obra,\n \"previous\":reverse(\"fachadas:detalles-obra\", kwargs={\"obra_pk\": self.kwargs[\"obra_pk\"], \n \"año_mes\": date.today().strftime(\"%Y-%m\"), \n \"dia\": date.today().strftime(\"%d\")}),\n \"inicio_pago\":inicio_pago,\n \"final_pago\":final_pago\n })\n # set your context\n return context\n\nclass PagoDeleteView(EmpleadoRequiredMixin, DeleteView):\n model = Pago\n http_method_names = ['delete']\n\n def dispatch(self, request, *args, **kwargs):\n handler = getattr(self, 'delete')\n return handler(request, *args, **kwargs)\n\n def get_success_url(self):\n success_url = str(reverse('fachadas:filtrar-pagos', args=[self.kwargs[\"obra_pk\"], self.kwargs[\"inicio_pago\"], self.kwargs[\"final_pago\"]]))\n return success_url\n\n\n","repo_name":"TheArchKnight/python_crm","sub_path":"fachadas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18737484367","text":"from . import better_round\n\nprefixes = {'K': 3,\n 'M': 6,\n 'G': 9,\n 'T': 12,\n 'm': -3,\n 'μ': -6,\n 'n': -9}\n\ndef from_prefix(string):\n if string[-1] in prefixes:\n return float(string[:-1])*10**prefixes[string[-1]]\n \n return float(string)\n\ndef to_prefix(number, signs = None):\n if number == 0:\n return '0'\n \n if signs != None:\n number = better_round.better_round(number, signs)\n \n m = np.log10(abs(number))\n max_ = ''\n max_mult = 0\n for p in prefixes:\n v = prefixes[p]\n # print(m, v, v*m > 0, m - v < 3, m - v > 0)\n if v*m > 0 and m - v < 3 and m - v >= 0:\n if abs(v) > abs(max_mult):\n max_mult = v\n max_ = p\n if signs == None:\n return str(round(number/10**max_mult, 9)) + max_\n else:\n return str(better_round.better_round(number/10**max_mult, signs)) + max_\n\n","repo_name":"sitandr/physlab","sub_path":"metric_prefixes.py","file_name":"metric_prefixes.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"74569860357","text":"# -*- coding: utf-8 -*-\nfrom ..solver import BC_HD, BC_PR, Solver\nfrom ...tensor_wrapper import Vector, Matrix\n\nclass SolverFS_3d(Solver):\n '''\n 3D Finite Sum (FS) solver for elliptic PDEs of the form\n -div(k grad u) = f. See parent class Solver for more details.\n '''\n\n def _gen_coefficients(self, PDE, GRD, d, n, h, dim, mode, tau, verb):\n self.iKx = PDE.Kx.build([GRD.xc, GRD.yr, GRD.zr], verb=verb, inv=True, to_diag=True)\n self.iKy = PDE.Ky.build([GRD.xr, GRD.yc, GRD.zr], verb=verb, inv=True, to_diag=True)\n self.iKz = PDE.Kz.build([GRD.xr, GRD.yr, GRD.zc], verb=verb, inv=True, to_diag=True)\n self.f = PDE.F.build([GRD.xr, GRD.yr, GRD.zr], verb=verb)\n\n def _gen_matrices(self, d, n, h, dim, mode, tau, verb):\n isperiodic = self.PDE.bc == BC_PR\n\n I = Matrix.eye(d, mode, tau)\n\n B = Matrix.volterra(d, mode, tau, h, isperiodic=isperiodic)\n self.Bx = I.kron(I).kron(B)\n self.By = I.kron(B).kron(I)\n self.Bz = B.kron(I).kron(I)\n\n self.iqx = self.iKx.diag().sum_out(dim, 0)\n self.iqy = self.iKy.diag().sum_out(dim, 1)\n self.iqz = self.iKz.diag().sum_out(dim, 2)\n\n self.qx = self.iqx.inv(v0=None, verb=verb, name = 'qx')\n self.qy = self.iqy.inv(v0=None, verb=verb, name = 'qy')\n self.qz = self.iqz.inv(v0=None, verb=verb, name = 'qz')\n\n E = Matrix.ones(d, mode, tau)\n self.Wx = self.qx.diag().kron(E)\n self.Wy = self.qy.kron2e()\n self.Wz = E.kron(self.qz.diag())\n\n I3 = Matrix.eye(d*dim, mode, tau)\n self.Rx = self.iKx.dot(I3-self.Wx.dot(self.iKx)).dot(self.Bx.T)\n self.Ry = self.iKy.dot(I3-self.Wy.dot(self.iKy)).dot(self.By.T)\n self.Rz = self.iKz.dot(I3-self.Wz.dot(self.iKz)).dot(self.Bz.T)\n\n self.Hx = self.Bx.dot(self.Rx)\n self.Hy = self.By.dot(self.Ry)\n self.Hz = self.Bz.dot(self.Rz)\n\n def _gen_system(self, d, n, h, dim, mode, tau, verb):\n self.A = Matrix.block([[self.Hz + self.Hx , self.Hz ],\n [self.Hz , self.Hz + self.Hy ]])\n self.rhs = self.Hz.dot(self.f)\n self.rhs = Vector.block([self.rhs, self.rhs])\n\n def _gen_solution(self, PDE, LSS, d, n, h, dim, mode, eps, tau, verb):\n sol = LSS.solve(self.A, self.rhs, eps, tau, PDE.sol0, verb)\n self.wx = sol.half(0)\n self.wy = sol.half(1)\n self.ux = self.Rx.dot(self.wx)\n self.uy = self.Ry.dot(self.wy)\n self.uz = self.Rz.dot(self.f - self.wx - self.wy)\n self.u = self.Hx.dot(self.wx)\n","repo_name":"AndreiChertkov/qttpdesolver","sub_path":"qttpdesolver/solvers/solver_fs/solver_fs_3d.py","file_name":"solver_fs_3d.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"39923397222","text":"\"\"\"\nParse and upload sequence read sets.\n\nSequence read sets contain a sample and urls of an external datastore for a set\nof FASTQ files from a single sequencing run.\n\"\"\"\nimport click\nimport logging\nimport json\nimport re\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Optional\nfrom urllib.parse import urljoin\nfrom id3c.cli import cli\nfrom id3c.cli.command import with_database_session\nfrom id3c.db.session import DatabaseSession\nfrom id3c.db.datatypes import as_json\n\n\nLOG = logging.getLogger(__name__)\n\n\n@cli.group(\"sequence-read-set\", help=__doc__)\ndef sequence_read_set():\n pass\n\n@sequence_read_set.command(\"parse\")\n\n@click.argument(\"fastq_directory\",\n metavar = \"\",\n type = click.Path(exists=True, file_okay=False))\n\n@click.option(\"--filename-pattern\",\n help = \"Regex pattern to match sample in expected filename\",\n metavar = \"\",\n default = r'^(?P\\d+)_',\n show_default = True)\n\n@click.option(\"--url-prefix\",\n help = \"Base for fully-qualifying sequence read set URLs\",\n metavar = \"\",\n default = \"file://rhino.fhcrc.org\",\n show_default = True)\n\ndef parse(fastq_directory, filename_pattern, url_prefix):\n \"\"\"\n Find all *.fastq.gz files within a provided , which should be an\n absolute file path.\n\n The provided --filename-pattern regular expression is used to extract the\n sample ID from each FASTQ filename. The regex should contain a capture\n group named \"sample\". Each set of files with the same sample ID are\n grouped into a single sequence read set.\n\n All sequence read sets are output to stdout as newline-delimited JSON\n records. You will likely want to redirect stdout to a file.\n \"\"\"\n sequence_read_sets: Dict[str, list] = defaultdict(list)\n filename_pattern = re.compile(filename_pattern)\n\n for filepath in list(Path(fastq_directory).glob(\"*.fastq.gz\")):\n filename = filepath.name\n # Check the filename matches provided filename pattern\n filename_match = filename_pattern.match(filename)\n assert filename_match, f\"Filename {filename} doesn't match provided --filename-pattern\"\n\n # Extract the sample from the filename_match\n try:\n sample = filename_match.group(\"sample\")\n except IndexError:\n LOG.error(f\"Filename {filename} matched provided --filename-pattern, but didn't extract a «sample» capture group\")\n raise\n\n sequence_read_sets[sample].append(urljoin(url_prefix, str(filepath)))\n\n for sample in sequence_read_sets:\n print(as_json({\"sample\": sample, \"urls\": sequence_read_sets[sample]}))\n\n\n@sequence_read_set.command(\"upload\")\n@click.argument(\"sequence-read-set-file\",\n metavar = \"\",\n type = click.File(\"r\"))\n@click.argument(\"unknown-sample-output\",\n metavar= \"\",\n type=click.File(\"w\"))\n@with_database_session\n\ndef upload(sequence_read_set_file, unknown_sample_output, db: DatabaseSession):\n \"\"\"\n Upload sequence read sets into the database warehouse.\n\n must be a newline delimited JSON file produced\n by this command's sibling command.\n\n Sequence read sets with NWGC sample IDs that cannot be found within the\n database warehouse are printed out as newline delimited JSON file\n .\n \"\"\"\n\n for sequence_read_set in sequence_read_set_file:\n sample_set = json.loads(sequence_read_set)\n nwgc_id = sample_set.get(\"sample\")\n urls = sample_set.get(\"urls\")\n with db.savepoint(f\"sequence read set {nwgc_id}\"):\n LOG.info(f\"Processing sequence read set for sample {nwgc_id}\")\n sample_id = find_sample(db, nwgc_id)\n if sample_id is None:\n LOG.warning(f\"Skipping sample with NWGC ID «{nwgc_id}» because it was not found within warehouse.\")\n unknown_sample_output.write(sequence_read_set)\n continue\n sequence_read_set = insert_sequence_read_set(db, sample_id, urls)\n LOG.info(f\"Finished uploading sequence read set for sample {nwgc_id}\")\n\n\ndef find_sample(db: DatabaseSession, nwgc_id: str) -> Optional[int]:\n \"\"\"\n Find sample within warehouse that has *nwgc_id* in the sample details.\n \"\"\"\n LOG.debug(f\"Looking up sample with NWGC ID: «{nwgc_id}»\")\n\n sample = db.fetch_row(\"\"\"\n select sample_id as id\n from warehouse.sample\n where details @> '{\"nwgc_id\": [%s]}'\n \"\"\", (int(nwgc_id),))\n\n if not sample:\n LOG.error(f\"No sample with NWGC ID «{nwgc_id}» found\")\n return None\n\n LOG.info(f\"Found sample {sample.id}\")\n return sample.id\n\n\ndef insert_sequence_read_set(db: DatabaseSession, sample_id: int, urls: list):\n \"\"\"\n Insert sequencing read set directly into warehouse.sequence_read_set,\n with the *sample_id* and *urls*.\n \"\"\"\n LOG.debug(f\"Inserting sequence read set for sample {sample_id}\")\n\n data = {\n \"sample_id\": sample_id,\n \"urls\": urls\n }\n\n sequence_read_set = db.fetch_row(\"\"\"\n insert into warehouse.sequence_read_set (sample_id, urls)\n values (%(sample_id)s, %(urls)s)\n returning sequence_read_set_id as id\n \"\"\", data)\n assert sequence_read_set.id, \"Insert failed\"\n return sequence_read_set\n","repo_name":"seattleflu/id3c","sub_path":"lib/id3c/cli/command/sequence_read_set.py","file_name":"sequence_read_set.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"62"} +{"seq_id":"8397536104","text":"n=int(input())\nl=[]\nc=0\ndef is_prime(x):\n if x==0 or x==1:\n return 0\n for j in range(2,x//2+1):\n if x%j==0:\n return 0\n else:\n return 1\nfor i in range(1,n+1):\n if n%i==0:\n l.append(i)\nfor i in l:\n if is_prime(i)==0:\n c+=1\nprint(c)","repo_name":"Swathipatchigolla/codemind-python","sub_path":"Count_of_the_non-prime_divisors_of_a_given_number.py","file_name":"Count_of_the_non-prime_divisors_of_a_given_number.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33097861071","text":"import matplotlib.pyplot as plt\nimport scipy.io as sio\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom model import ConvBlock, DeConvBlock\nfrom tqdm import tqdm\nimport torch.nn.functional as F\n\n\ndef normalize_pixels(img):\n \"\"\"\n 归一化图片像素值\n :param img: 待归一化的图片\n :return: 归一化后的图片\n \"\"\"\n # 将像素值转换为 float 类型\n img = img.astype(np.float32)\n # 归一化像素值,将像素值范围缩放到 [0, 1]\n img = img / 255.0\n return img\n\n\norigin_data = sio.loadmat('Data/rgbd_mtv.mat')\nlabel = origin_data['gt'][:, 0]\nall_features = origin_data['X']\n\nview_shape = []\nviews = []\nfor v in all_features[0]:\n view_shape.append(v.shape[1])\n views.append(v)\n\n# 我们先从single_view 开始,以第一个视图为例\nsingle_view = views[0]\nnum_classes = np.unique(label).shape[0]\n\nreg1 = 1.0\nreg2 = 1.0\nalpha = max(0.4 - (num_classes - 1) / 10 * 0.1, 0.1)\nlr = 1e-3\n\nviews[0] = np.transpose(views[0], [0, 3, 1, 2])\nviews[1] = np.transpose(views[1], [0, 3, 1, 2])\n# views[0] = normalize_pixels(views[0])\ndel views[2]\ntensors = [torch.from_numpy(arr) for arr in (views)]\n\nfrom MvDSCN import MsDSCN\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n# init_model = MsDSCN(views_data=tensors, n_samples=label.shape[0], device=device, learning_rate=1e-3, epochs=200,\n# ft=False)\n# model = init_model.train()\n# torch.cuda.empty_cache()\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_dim, output_dim):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=input_dim, out_channels=64, kernel_size=3, stride=2, padding=1)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1)\n self.conv3 = nn.Conv2d(in_channels=64, out_channels=output_dim, kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.conv3(x)\n x = F.relu(x)\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, input_dim, output_dim):\n super(Decoder, self).__init__()\n self.de_conv1 = nn.ConvTranspose2d(in_channels=input_dim, out_channels=64, kernel_size=3, stride=2,\n output_padding=1, padding=1)\n self.de_conv2 = nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=2,\n output_padding=1, padding=1)\n # self.de_conv3 = nn.ConvTranspose2d(in_channels=64, out_channels=output_dim, kernel_size=3, stride=2,\n # output_padding=1, padding=1)\n self.de_conv3 = nn.ConvTranspose2d(in_channels=64, out_channels=output_dim, kernel_size=3, stride=2,\n output_padding=1, padding=1)\n\n def forward(self, x):\n x = self.de_conv1(x)\n x = F.relu(x)\n x = self.de_conv2(x)\n x = F.relu(x)\n x = self.de_conv3(x)\n x = F.relu(x)\n return x\n\n\nclass AutoEncoderInit(nn.Module):\n def __init__(self, batch_size):\n super(AutoEncoderInit, self).__init__()\n # different view feature input\n self.batch_size = batch_size\n\n self.encoder1 = Encoder(input_dim=3, output_dim=64)\n self.encoder2 = Encoder(input_dim=1, output_dim=64)\n self.encoder1_single = Encoder(input_dim=3, output_dim=64)\n self.encoder2_single = Encoder(input_dim=1, output_dim=64)\n\n self.decoder1 = Decoder(input_dim=64, output_dim=3)\n self.decoder2 = Decoder(input_dim=64, output_dim=1)\n self.decoder1_single = Decoder(input_dim=64, output_dim=3)\n self.decoder2_single = Decoder(input_dim=64, output_dim=1)\n\n def forward(self, all_views_data):\n view_1_data = all_views_data[0]\n view_2_data = all_views_data[1]\n rec_x1 = self.decoder1(self.encoder1(view_1_data))\n rec_x2 = self.decoder2(self.encoder2(view_2_data))\n rec_x1_single = self.decoder1_single(self.encoder1_single(view_1_data))\n rec_x2_single = self.decoder2_single(self.encoder2_single(view_2_data))\n return rec_x1, rec_x2, rec_x1_single, rec_x2_single\n\n\nmodel = AutoEncoderInit(batch_size=500)\ntensors[0] = tensors[0].to(device)\ntensors[1] = tensors[1].to(device)\n\nmodel = model.to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=0)\nloss_values = []\nepoch_iter = tqdm(range(300))\nfor epoch in epoch_iter:\n model.train()\n view1_out, view2_out, view1_out_single, view2_out_single = model(tensors)\n loss1 = 0.5 * torch.norm(view1_out - tensors[0], p=2) ** 2\n loss2 = 0.5 * torch.norm(view2_out - tensors[1], p=2) ** 2\n loss3 = 0.5 * torch.norm(view1_out_single - tensors[0], p=2) ** 2\n loss4 = 0.5 * torch.norm(view2_out_single - tensors[1], p=2) ** 2\n loss = loss1 + loss2 + loss3 + loss4\n epoch_iter.set_description(\n f\"# Epoch {epoch}, train_loss: {loss.item():.4f}, loss1: {loss1.item():.4f}, loss2: {loss2.item():.4f}, loss3: {loss3.item():.4f},loss4: {loss4.item():.4f}\")\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\nrec_1_data = view1_out.detach().cpu().numpy()\nrec_2_data = view2_out.detach().cpu().numpy()\nrec_3_data = view1_out_single.detach().cpu().numpy()\nrec_4_data = view2_out_single.detach().cpu().numpy()\nplt.figure(figsize=(8, 8))\nplt.imshow(np.transpose(np.clip(rec_1_data[1], 0, 1), [1, 2, 0]))\nplt.show()\nplt.imshow(np.transpose(np.clip(rec_2_data[1], 0, 1), [1, 2, 0]))\nplt.show()\nplt.imshow(np.transpose(np.clip(rec_3_data[1], 0, 1), [1, 2, 0]))\nplt.show()\nplt.imshow(np.transpose(np.clip(rec_4_data[1], 0, 1), [1, 2, 0]))\nplt.show()\nplt.imshow(np.transpose(np.clip(views[0][1], 0, 1), [1, 2, 0]))\nplt.show()\n","repo_name":"bbchond/Pytorch_MvDSCN","sub_path":"mvc_init_demo.py","file_name":"mvc_init_demo.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70386986758","text":"#!/usr/bin/env python3\n\n\nimport json\nimport boto3\nimport time\ncurrent_time=int(time.time())\n\nec2 = boto3.resource('ec2', region_name='us-west-2')\nclient = boto3.client('ec2')\n\n\ndef get_regions():\n \"\"\"either configure active regions here or derive regions from aws\"\"\"\n \"\"\"future code\n returns: l_regions : list of region names\n \"\"\"\n pass\n\ndef enumerate_vpc_subnets():\n \"\"\"WIP\"\"\"\n for vpc in ec2.vpcs.all():\n for subnet in vpc.subnets.all():\n print(vpc, \"all:\", subnet)\n\n for az in ec2.meta.client.describe_availability_zones()[\"AvailabilityZones\"]:\n for subnet in vpc.subnets.filter(Filters=[{\"Name\": \"availabilityZone\", \"Values\": [az[\"ZoneName\"]]}]):\n print(vpc, az[\"ZoneName\"], \"filter:\", subnet)\n\n\n\ndef get_subnets_only():\n \"\"\"Collect subnets from VPC / region\"\"\"\n my_cidrs=[]\n my_cidrs_csv=[]\n #print('[*] my_cidrs: ')\n\n for i in client.describe_subnets()['Subnets']:\n #print(i['CidrBlock'])\n my_cidrs.append(i['CidrBlock'])\n\n\n for i in my_cidrs:\n #print('[*] : ', i)\n\n x,y=i.split('/')\n\n my_temp_string = x + ',' + y + ',' + '1' + ',' + str(current_time)\n my_cidrs_csv.append(my_temp_string)\n #print(my_temp_string)\n\n return(my_cidrs_csv)\n\n #my_cidrs_csv=[0,]\n\n# Add formatting for data discovery output. Select specific IDs useful for database population.\nfor i in client.describe_subnets()['Subnets']:\n print('[*] SubnetID: {} CidrBlock: {}'.format(i['SubnetId'],i['CidrBlock']))\n\n\n\nif __name__ == '__main__':\n enumerate_vpc_subnets()\n get_subnets_only()\n\n print('[*] function checking: ')\n for i in get_subnets_only():\n print(i)\n","repo_name":"mobycoder001/IPAVe","sub_path":"stubcode/get_cidrs.py","file_name":"get_cidrs.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"17627082047","text":" #importing the OpenCV module\r\nimport cv2\r\n\r\n#method to create video capturing object which will trigger the camera\r\ncap = cv2.VideoCapture(0)\r\n\r\n#setting the width and height of the frame\r\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)\r\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\r\n\r\n#using while loop for continuous display of frames untill break condition\r\nwhile True:\r\n #frame is a Numpy array representing first image that VideoCaptures\r\n #_ is a bool data type that returns true if python reads VideoCapture object\r\n _, frame = cap.read()\r\n \r\n #converting our frame from RGB model to HSV model\r\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n #frame.shape retuns a tuple (r,c,ch)\r\n #rows, columns and channels of frame assigned to height, width and _ resp.\r\n height, width, _ = frame.shape\r\n\r\n #getting the coordinates of the centre of frame\r\n cx = int(width / 2)\r\n cy = int(height / 2)\r\n \r\n # Picking pixel at the centre of the HSV frame\r\n pixel_center = hsv_frame[cy, cx]\r\n \r\n #Picking the hue value out of HSV (Hue, Saturation and value)\r\n hue_value = pixel_center[0]\r\n\r\n #color detection for different hue values\r\n if (hue_value < 7 or 175 < hue_value < 179):\r\n color = \"RED\"\r\n elif hue_value < 22:\r\n color = \"ORANGE\"\r\n elif hue_value < 33:\r\n color = \"YELLOW\"\r\n elif (33 < hue_value < 78) :\r\n color = \"GREEN\"\r\n elif hue_value < 131:\r\n color = \"BLUE\"\r\n elif hue_value < 170:\r\n color = \"VIOLET\"\r\n else:\r\n color = \"RED\"\r\n \r\n # Picking pixel at the centre of the frame\r\n pixel_center_bgr = frame[cy, cx]\r\n \r\n #storing the RBG values at centre of the frame in b,g,r\r\n b, g, r = int(pixel_center_bgr[0]), int(pixel_center_bgr[1]), int(pixel_center_bgr[2])\r\n \r\n #placing a rectangular section , text varying color and circle detecting color on the frame\r\n cv2.rectangle(frame, (cx - 220, 10), (cx + 200, 120), (255, 255, 255), -1)\r\n cv2.putText(frame, color, (cx - 200, 100), 0, 3, (b, g, r), 5)\r\n cv2.circle(frame, (cx, cy), 5, (25, 25, 25), 3)\r\n \r\n #method for displaying the frame\r\n cv2.imshow(\"Frame\", frame)\r\n \r\n #shows the output for 1 milliseconds but for infinite while loop it outputs\r\n #sequence of images\r\n key = cv2.waitKey(1)\r\n \r\n #window will be destroyed on pressing the key 27\r\n if key == 27:\r\n break\r\n \r\n#release the VideoCapture object\r\ncap.release()\r\n\r\n#close all the windows currently opened\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sajidjavid222/OpenCV_Projects","sub_path":"color_recognition_comments.py","file_name":"color_recognition_comments.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"71272638599","text":"import cv2\nimport numpy as np\nfrom scipy.interpolate import UnivariateSpline\n\nclass Cool(object):\n\t\"\"\"cool_filter ---\n\t\tThis class will apply cool filter to an image \n\t\tby giving a sky blue effect to the input image.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t# create look-up tables for increasing and decreasing red and blue resp.\n\t\tself.increaseChannel = self.LUT_8UC1([0, 64, 128, 192, 256],\n\t\t\t\t\t\t\t\t\t\t\t\t [0, 70, 140, 210, 256])\n\t\tself.decreaseChannel = self.LUT_8UC1([0, 64, 128, 192, 256],\n\t\t\t\t\t\t\t\t\t\t\t\t [0, 30, 80, 120, 192])\n\n\tdef resize(self,image,window_height = 500):\n\t\taspect_ratio = float(image.shape[1])/float(image.shape[0])\n\t\twindow_width = window_height/aspect_ratio\n\t\timage = cv2.resize(image, (int(window_height),int(window_width)))\n\t\treturn image\t\n\t\t\n\tdef render(self, img_rgb):\n\t\timg_rgb = cv2.imread(img_rgb)\n\t\timg_rgb = self.resize(img_rgb, 500)\n\t\t#cv2.imshow(\"Original\", img_rgb)\n\t\tr,g,b = cv2.split(img_rgb)\n\t\tr = cv2.LUT(r, self.increaseChannel).astype(np.uint8)\n\t\tb = cv2.LUT(b, self.decreaseChannel).astype(np.uint8)\n\t\timg_rgb = cv2.merge((r,g,b))\n\n\t\t# saturation decreased\n\t\th,s,v = cv2.split(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV))\n\t\ts = cv2.LUT(s, self.decreaseChannel).astype(np.uint8)\n\n\n\t\treturn cv2.cvtColor(cv2.merge((h,s,v)), cv2.COLOR_HSV2RGB)\n\n\tdef LUT_8UC1(self, x, y):\n\t\t#Create look-up table using scipy spline interpolation function\n\t\tspl = UnivariateSpline(x, y)\n\t\treturn spl(range(256))\n\n\tdef start(self, img_path):\n\t\ttmp_canvas = Cool() #make a temporary object\n\t\tfile_name = img_path #File_name will come here\n\t\tres = tmp_canvas.render(file_name)\n\t\tcv2.imwrite(\"Cool_version.jpg\", res)\n\t\tcv2.imshow(\"Cool version\", res)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()\n\t\tprint(\"Image saved as 'Cool_version.jpg'\")\n","repo_name":"ishita27/Image-Filters","sub_path":"cool_filter.py","file_name":"cool_filter.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"62"} +{"seq_id":"32387087908","text":"#!/usr/bin/env python3\r\n# _*_ coding:utf-8 _*_\r\n\r\nfrom PIL import Image,ImageDraw\r\nimport face_recognition\r\nimport os\r\n\r\n# Load the jpg file into a numpy array\r\ndir=\"images\"\r\nfiles=os.listdir(dir)\r\nfilepath=dir+\"/img.jpg\"\r\nimage = face_recognition.load_image_file(filepath)\r\n\r\n'''\r\ndir=\"images\"\r\nfiles=os.listdir(dir)\r\nfor file in files:\r\n filepath=dir+\"/\"+file\r\n image = face_recognition.load_image_file(filepath)\r\n'''\r\n\r\n# Find all the faces in the image using the default HOG-based model.\r\n# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.\r\n# See also: find_faces_in_picture_cnn.py\r\nface_locations = face_recognition.face_locations(image)\r\n\r\nprint(\"I found {} face(s) in this photograph.\".format(len(face_locations)))\r\n\r\nfor face_location in face_locations:\r\n\r\n # Print the location of each face in this image\r\n top, right, bottom, left = face_location\r\n print(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\r\n\r\n imgsplit = os.path.split(filepath)[1]\r\n imgtitle = os.path.splitext(imgsplit)[0]\r\n\r\n # You can access the actual face itself like this:\r\n # save face image\r\n face_image = image[top:bottom, left:right]\r\n pil_face_image = Image.fromarray(face_image)\r\n pil_face_image.save('./faceimg/' + imgtitle + '_faceimg.jpg')\r\n\r\n pil_image = Image.fromarray(image)\r\n face = ImageDraw.Draw(pil_image, 'RGBA')\r\n face.rectangle((right, top, left, bottom))\r\n pil_image.save('./testimg/' + imgtitle + '_testimg.jpg')\r\n pil_image.show()","repo_name":"0xFlag/Face-Recognition","sub_path":"face_recognition/find_face/find_face_PIL.py","file_name":"find_face_PIL.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15087687152","text":"from moviepy.editor import *\nimport ffmpy\nfrom os import walk\nimport tkinter.messagebox as mb\nimport random\n\ndef convert(outputExt, outputSize, firstCut, secondCut):\n print(\"output extention: \", outputExt)\n print(\"output size: \", outputSize)\n print(\"start cut: \", firstCut)\n print(\"finish cut: \", secondCut)\n\n def process(filename, fileExt):\n clip = VideoFileClip(f\"input/{filename}{fileExt}\")\n \n if secondCutInSec > 0 and firstCutInSec < secondCutInSec:\n clip = clip.subclip(firstCutInSec, secondCutInSec)\n \n if outputSize == '1280:720' or outputSize == '1920:1080':\n clip = clip.fx(vfx.resize, width = int(outputSize.split(\":\")[0]))\n \n outputName = f'output-{random.randint(0, 99999999)}'\n\n if outputExt == \"mp4\":\n clip.write_videofile(f\"output/{outputName}.mp4\", codec=\"libx264\")\n elif outputExt == \"avi\":\n clip.write_videofile(f\"output/{outputName}.avi\", codec=\"rawvideo\")\n elif outputExt == \"mkv\":\n clip.write_videofile(f\"bin/temp/{outputName}.mp4\", codec=\"mpeg4\")\n ff = ffmpy.FFmpeg(\n executable='bin/ffmpeg/ffmpeg.exe',\n inputs={f'bin/temp/{outputName}.mp4': None},\n outputs={f'output/{outputName}.mkv': None}\n )\n ff.run() # and then convert to mkv\n\n clip.close()\n \n firstCutInSec = (int(firstCut.split(\":\")[0])*3600) + (int(firstCut.split(\":\")[1])*60) + int(firstCut.split(\":\")[2])\n secondCutInSec = (int(secondCut.split(\":\")[0])*3600) + (int(secondCut.split(\":\")[1])*60) + int(secondCut.split(\":\")[2])\n\n filenames = next(walk(\"input/\"), (None, None, []))[2] # [] if no file\n print(filenames)\n extentionsToWorkOn = [\".mp4\", \".avi\", \".mkv\"]\n for i in filenames:\n filename = i.split(\".\")[0]\n fileExt = f'.{i.split(\".\")[-1]}'\n if fileExt in extentionsToWorkOn:\n process(filename, fileExt)\n\n print(\"################\\n\\033[32m process finished \\033[0m\\n################\")\n msg = \"Процесс конвертации завершен! Конвертированные файлы будут сохранены в папку output\"\n mb.showinfo(\"Процесс завершен\", msg)","repo_name":"mlt-melt/video-converter","sub_path":"source code/converterMain.py","file_name":"converterMain.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2146247792","text":"import unittest\nfrom pymgrid.utils.DataGenerator import *\nfrom pandas import Series\nfrom pandas.testing import assert_frame_equal, assert_series_equal\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nfrom matplotlib import pyplot as plt\n\ndef create_pv_test_set():\n test_set = np.zeros(48)\n test_set[7:20] = np.arange(7,20)\n test_set[7:20] = -20*(test_set[7:20]-13)**2 + 30*-1**(np.arange(7,20) % 2) + 5*(np.arange(6,19) %5)\n test_set[7:20] += -1*np.min(test_set[7:20])\n\n n = 24\n test_set[7+n:20+n] = np.arange(7,20)\n test_set[7+n:20+n] = -20*(test_set[7+n:20+n]-13)**2 + 20*-1**(np.arange(6,19) % 2) + 4*(np.arange(6,19) %3)\n test_set[7+n:20+n] += -1*np.min(test_set[7+n:20+n])\n\n return test_set/10\n\n\nclass TestNoisyPV(unittest.TestCase):\n def setUp(self):\n\n self.test_data = create_pv_test_set()\n self.test_series = Series(data = self.test_data)\n\n\n def test_init(self):\n NPV = NoisyPVData(pv_data = self.test_series)\n df = pd.DataFrame(self.test_data)\n assert_frame_equal(NPV.unmunged_data, df)\n assert_frame_equal(NPV.data, df)\n\n def test_data_munge(self):\n NPV = NoisyPVData(pv_data=self.test_series)\n NPV.data_munge()\n\n # Assertions:\n assert_array_equal(NPV.data.values[:,0],self.test_data[:24])\n assert_array_equal(NPV.data.values[:,1],self.test_data[24:])\n assert_array_equal(NPV.daily_maxes['time_of_max'].values, np.array([13,13]))\n assert_array_equal(NPV.daily_maxes['cumulative_hr'], np.array([13, 37]))\n self.assertTrue(NPV.munged)\n\n def test_add_feature_columns(self):\n NPV = NoisyPVData(pv_data=self.test_series)\n NPV.data_munge()\n\n num_feature_functions = 1\n period_scale = 0.8\n\n NPV._add_feature_columns(num_feature_functions=num_feature_functions, period_scale=period_scale)\n\n self.assertIn('ones', NPV.daily_maxes.columns.values)\n self.assertIn('cos1x', NPV.daily_maxes.columns.values)\n assert_array_equal(NPV.daily_maxes['ones'], np.array([1,1]))\n cos1x = np.cos(\n 2 * np.pi / 8760. * period_scale * (NPV.daily_maxes['cumulative_hr'] - 173 * 24))\n assert_array_equal(NPV.daily_maxes['cos1x'], cos1x)\n\n self.assertListEqual(NPV.feature_names, ['ones', 'cos1x'])\n\n for name in NPV.feature_names:\n assert_array_equal(NPV.feature_functions[name](NPV.daily_maxes['cumulative_hr']).values, NPV.daily_maxes[name].values)\n\n\nclass TestNoisyGrid(unittest.TestCase):\n\n def setUp(self) -> None:\n always_on = np.ones(48)\n self.always_on = pd.Series(always_on)\n self.with_outages = self.always_on.copy()\n self.with_outages.iloc[3:6] = 0\n self.with_outages.iloc[40:47] = 0\n self.with_outages_data = dict(naive_probabilities = np.array([10/48, 38/48]),\n occurences=np.array([10,37]),\n transition_prob_matrix = np.array([\n [8 / 10, 2 / 10],\n [2/37, 35/37]\n ]))\n self.dist_types = ('naive', 'markov')\n\n def test_init(self):\n\n for dist_type in self.dist_types:\n for data in self.always_on, self.with_outages:\n NGD = NoisyGridData(data,dist_type=dist_type)\n assert_series_equal(data, NGD.data)\n assert_series_equal(data, NGD.unmunged_data)\n\n def test_bad_grid_data(self):\n grid_data = self.with_outages.copy()\n grid_data[5] = -3\n try:\n NoisyGridData(grid_data)\n except ValueError:\n pass\n except Exception:\n self.fail('unexpected exception raised')\n else:\n self.fail('ValueError not raised')\n grid_data[5] = 1.1\n try:\n NoisyGridData(grid_data)\n except ValueError:\n pass\n except Exception:\n self.fail('unexpected exception raised')\n else:\n self.fail('ValueError not raised')\n\n def test_learn_distribution_always_on_naive(self):\n NGD = NoisyGridData(self.always_on, dist_type='naive')\n self.assertFalse(NGD.has_distribution)\n NGD.learn_distribution()\n self.assertTrue(NGD.has_distribution)\n\n assert_array_equal(NGD.transition_prob_matrix, np.array([0, 1]))\n\n def test_learn_distribution_always_on_markov(self):\n NGD = NoisyGridData(self.always_on, dist_type='markov')\n self.assertFalse(NGD.has_distribution)\n NGD.learn_distribution()\n self.assertTrue(NGD.has_distribution)\n\n assert_array_equal(NGD.occurrences, np.array([0, 47]))\n assert_array_equal(NGD.transition_prob_matrix, np.array([[1,0],[0,1]]))\n\n def test_learn_distribution_with_outages_naive(self):\n NGD = NoisyGridData(self.with_outages, dist_type='naive')\n self.assertFalse(NGD.has_distribution)\n NGD.learn_distribution()\n self.assertTrue(NGD.has_distribution)\n\n assert_array_almost_equal(NGD.transition_prob_matrix, self.with_outages_data['naive_probabilities'])\n\n def test_learn_distribution_with_outages_markov(self):\n NGD = NoisyGridData(self.with_outages, dist_type='markov')\n self.assertFalse(NGD.has_distribution)\n NGD.learn_distribution()\n self.assertTrue(NGD.has_distribution)\n\n assert_array_almost_equal(NGD.occurrences, self.with_outages_data['occurences'])\n assert_array_almost_equal(NGD.transition_prob_matrix, self.with_outages_data['transition_prob_matrix'])\n\n def test_sample_always_on_naive(self):\n NGD = NoisyGridData(self.always_on, dist_type='naive')\n NGD.learn_distribution()\n sample = NGD.sample()\n assert_array_equal(sample, np.ones(48))\n\n def test_sample_always_on_markov(self):\n NGD = NoisyGridData(self.always_on, dist_type='markov')\n NGD.learn_distribution()\n sample = NGD.sample()\n assert_array_equal(sample, np.ones(48))\n\n def test_sample_with_outages_naive(self):\n # This is a ridiculous unit test All it does is check that the data generated from the probability distribution\n # matches the distribution. Thus, can fail randomly.\n np.random.seed(0)\n num_tests = 50\n\n NGD = NoisyGridData(self.with_outages, dist_type='naive')\n NGD.learn_distribution()\n\n probs_list = []\n for j in range(num_tests):\n sample = NGD.sample()\n new_NGD = NoisyGridData(sample, dist_type='naive')\n new_NGD.learn_distribution()\n probs_list.append(new_NGD.transition_prob_matrix)\n\n transition_prob_mean = np.mean(np.array(probs_list), axis=0)\n assert_array_almost_equal(self.with_outages_data['naive_probabilities'], transition_prob_mean, decimal=2)\n\n def test_sample_with_outages_markov(self):\n \"\"\"\n This is also a ridiculous unit test. All it does is check that the data generated from the probability distribution\n matches the distribution. Thus, can fail randomly.\n :return:\n \"\"\"\n\n np.random.seed(0)\n num_tests = 50\n\n NGD = NoisyGridData(self.with_outages, dist_type='markov')\n NGD.learn_distribution()\n\n probs_list = []\n for j in range(num_tests):\n sample = NGD.sample()\n new_NGD = NoisyGridData(sample, dist_type='markov')\n new_NGD.learn_distribution()\n probs_list.append(new_NGD.transition_prob_matrix)\n\n transition_prob_mean = np.mean(np.array(probs_list), axis=0)\n assert_array_almost_equal(self.with_outages_data['transition_prob_matrix'], transition_prob_mean, decimal=1)\n\n\nclass TestNoisyLoad(unittest.TestCase):\n def setUp(self) -> None:\n self.n_days = 12\n\n load_data = np.array([304, 205, 200, 200, 202, 306, 524, 611, 569, 466, 571, 579, 569, 470, 466, 465, 597, 625, 620, 525, 521, 524, 522, 531, 305, 200, 199, 200, 202, 306, 524, 611, 568, 466, 568, 579, 569, 467, 467, 466, 597, 626, 626, 525, 525, 524, 522, 533])\n load_data = np.concatenate([load_data + j % 5 for j in range(int(self.n_days/2))])\n\n self.load_data = pd.Series(data = load_data)\n\n def test_init(self):\n NLD = NoisyLoadData(load_data=self.load_data)\n assert_frame_equal(NLD.data, self.load_data.to_frame())\n assert_frame_equal(NLD.unmunged_data, self.load_data.to_frame())\n\n def test_data_munge(self):\n NLD = NoisyLoadData(load_data=self.load_data)\n\n self.assertFalse(NLD.munged)\n NLD.data_munge()\n self.assertTrue(NLD.munged)\n\n self.assertTupleEqual(NLD.load_mean.shape, (7,24))\n self.assertTupleEqual(NLD.load_std.shape, (7,24))\n\n self.assertEqual(NLD.data.shape[0], self.n_days)\n self.assertFalse(np.isnan(NLD.load_mean).any(axis=None))\n self.assertFalse(np.isnan(NLD.load_std).any(axis=None))\n\n for j in range(7):\n NLD_computed_avg = NLD.load_mean.iloc[j,:].values\n NLD_computed_std = NLD.load_std.iloc[j,:].values\n\n\n slice = self.load_data[24*j:24*(j+1)].values\n\n for k in range(1, (self.n_days-j) // 7 + 1):\n if (j+k*7) >= self.n_days:\n continue\n slice = np.stack((slice,self.load_data[24*(j+k*7):24*(j+k*7+1)]))\n\n if len(slice.shape) == 1:\n slice = slice.reshape((1, 24))\n assert_array_almost_equal(NLD_computed_avg, np.mean(slice, axis=0))\n else:\n assert_array_almost_equal(NLD_computed_avg, np.mean(slice, axis=0))\n assert_array_almost_equal(NLD_computed_std, np.std(slice, axis=0, ddof=1))\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"Total-RD/pymgrid","sub_path":"tests/control/data_generation/test_data_generator.py","file_name":"test_data_generator.py","file_ext":"py","file_size_in_byte":9924,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"62"} +{"seq_id":"1999392552","text":"from django.urls import path\nfrom . import views\n\n\"\"\" url handlers for the cart page\"\"\"\n\nurlpatterns = [\n path('order_cart/', views.view_order, name='view_order'),\n path('add//', views.add_to_cart, name='add_to_cart'),\n path('update//', views.update_cart, name='update_cart'),\n]\n","repo_name":"Dorcas-Amoo/ms4-project-arikefoods","sub_path":"order/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9151911467","text":"#!/usr/bin/env python3\n\nimport re\nimport os\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\nfrom sys import exit, stderr\nfrom time import process_time as time\n\nfrom generators import available_generators, create, reseed\nfrom randomness_test import nist_sp_800_22_tests\nfrom randomness_test import ent_tests as ent\nfrom utils.unit_tools import nicer_time\n\n\ndef main():\n try:\n generator_ids, n_bits, seed, rounds, tex_path = parse_arguments()\n except ValueError as e:\n print(e, file=stderr)\n exit(1)\n return # to remove inspection warnings\n\n print(\"RUN STAT TESTS\\nGenerators: %s\\nRounds: %s; Bits: %s; Seed: %s; TeX Path: %s\\n%s\"\n % (', '.join(generator_ids), \"{:,}\".format(rounds), \"{:,}\".format(n_bits), str(seed),\n tex_path, '=' * 80))\n\n run_all(generator_ids, n_bits, seed, rounds, tex_path)\n\n\ndef parse_arguments():\n parser = ArgumentParser(description='(Pseudo)Random Number Generators')\n parser.add_argument('-g', '--generator',\n help='The ID of generator to be tested instead of file.')\n parser.add_argument('-f', '--file',\n help='File with sequence to be tested instead of generator.')\n parser.add_argument('-n', '--bits', help='Total number of tested bits.', default='1M')\n parser.add_argument('-s', '--seed', help='Initial seed.')\n parser.add_argument('-r', '--rounds', help='Number of rounds to run each test.', default=1)\n parser.add_argument('-o', '--output', help='Output directory to write the results.')\n args = parser.parse_args()\n\n if args.seed:\n seed = int(args.seed)\n else:\n seed = None\n\n if (args.generator and args.file) or (not args.generator and not args.file):\n raise ValueError(\"Either generator or file should be specified\")\n\n if args.file:\n if not os.path.isfile(args.file):\n raise ValueError(\"Source file \" + args.file + \" does not exist\")\n generator_ids = ['file:' + args.file]\n seed = 0\n else:\n if args.generator == 'ALL':\n generator_ids = available_generators\n elif args.generator in available_generators:\n generator_ids = [args.generator]\n else:\n raise ValueError(\"Undefined generator. Available generators are:\\n\" +\n ', '.join(available_generators))\n\n if args.bits.lower().endswith(('k', 'm', 'g')):\n n_bits = int(args.bits[:-1]) * (10 ** {'k': 3, 'm': 6, 'g': 9}[args.bits.lower()[-1:]])\n else:\n n_bits = int(args.bits)\n\n rounds = int(args.rounds)\n\n output_path = args.output\n if output_path and not os.path.isdir(output_path):\n raise ValueError(\"Specified TeX path \" + output_path + \" does not exist.\")\n\n return generator_ids, n_bits, seed, rounds, output_path\n\n\ndef run_all(generator_ids, n_bits, seed=None, rounds=1, output_path=None):\n for gen_id in generator_ids:\n generator = create(gen_id, seed)\n # print('\\n- '.join(generator.info()))\n print(\"Testing of generator\", generator.NAME.upper())\n\n run_ent_tests(gen_id, generator, seed, n_bits, rounds, output_path)\n\n generator = create(gen_id, seed) # recreate the generator\n run_nist_tests(gen_id, generator, seed, n_bits, rounds, output_path)\n\n # TODO: FIPS\n\n\ndef run_ent_tests(generator_id, generator, seed, n_bits, rounds, output_path):\n filename_pattern = 'gen-%s-ent-tests-%dr'\n filename = filename_pattern % (re.sub(r\"[^A-Za-z0-9._-]+\", '_', generator_id), rounds)\n\n # save the initial info about the generator (e.g. the seed)\n generator_info = generator.info()\n\n time_start = time()\n all_results = OrderedDict(\n [('state', []), ('entropy', []), ('chi_sq', []), ('mean', []), ('monte_pi', []), ('scc', [])])\n for i in range(rounds):\n print('-' * 80, \"\\nRUNNING ENT TESTS, round\", i + 1)\n if seed is None:\n cur_seed = reseed(generator)\n print(\"New seed:\", cur_seed)\n print('\\n- '.join(generator.info()))\n entropy, chi_sq, mean, monte_pi, scc = ent.run_all(generator, n_bits, print_log=True)\n\n all_results['state'].append(generator.state())\n all_results['entropy'].append(entropy)\n all_results['chi_sq'].append(chi_sq)\n all_results['mean'].append(mean)\n all_results['monte_pi'].append(monte_pi)\n all_results['scc'].append(scc)\n\n if output_path:\n tsv = '# ENT statistical test results\\n'\n tsv += '# !!! INTERMEDIATE DATA AFTER %d ROUNDS\\n' % (i + 1)\n tsv += '# %s\\n' % '\\n# '.join(generator_info)\n tsv += generate_tsv_results(all_results)\n tsv += '\\n# Final generator state: %s\\n' % str(generator.getstate())\n tsv += '# Final generator info: %s\\n' % ', '.join(generator_info)\n file = save_to_file(\n output_path, '.intermediary-'+filename, 'tsv', tsv)\n print(\"(intermediary TSV written to \" + file + ')', flush=True)\n\n time_elapsed = time() - time_start\n\n print(\"*** Finished\", rounds, \"rounds of NIST tests for\", \"{:,}\".format(n_bits),\n \"bits each, in\", nicer_time(time_elapsed), \"seconds\", flush=True)\n\n # add info\n info = generator_info + [\n '', # empty line\n 'ENT statistical test results',\n '%d rounds of %s bits long samples' % (rounds, \"{:,}\".format(n_bits)),\n 'Time elapsed for generation and testing: ' + nicer_time(time_elapsed)]\n\n # generate TSV data tables\n tsv = '# ' + '\\n# '.join(info) + '\\n#\\n'\n tsv += generate_tsv_results(all_results)\n tsv += '\\n# Final generator state: %s\\n' % str(generator.getstate())\n tsv += '# Final generator info: %s\\n' % ', '.join(generator.info())\n if output_path:\n file = save_to_file(output_path, filename, 'tsv', tsv)\n print(\"TSV written to \" + file, flush=True)\n else:\n print(tsv, flush=True)\n\n\ndef run_nist_tests(generator_id, generator, seed, n_bits, rounds, output_path):\n filename_pattern = 'gen-%s-nist-tests-%dr'\n filename = filename_pattern % (re.sub(r\"[^A-Za-z0-9._-]+\", '_', generator_id), rounds)\n\n # save the initial info about the generator (e.g. the seed)\n generator_info = generator.info()\n\n time_start = time()\n all_p_values = OrderedDict([('state', [])])\n for i in range(rounds):\n print('-' * 80, \"\\nRUNNING NIST TESTS, round\", i + 1)\n if seed is None:\n cur_seed = reseed(generator)\n print(\"New seed:\", cur_seed)\n print('\\n- '.join(generator.info()))\n p_values = nist_sp_800_22_tests.run_all(generator, n_bits, print_log=True)\n\n all_p_values['state'].append(generator.state())\n for pval, test_id, test_name in p_values:\n if test_id not in all_p_values:\n all_p_values[test_id] = []\n all_p_values[test_id].append(pval)\n\n if output_path:\n tsv = '# NIST SP 800-22 statistical test results\\n'\n tsv += '# !!! INTERMEDIATE DATA AFTER %d ROUNDS\\n' % (i + 1)\n tsv += '# %s\\n' % '\\n# '.join(generator_info)\n tsv += generate_tsv_results(all_p_values)\n tsv += '\\n# Final generator state: %s\\n' % str(generator.getstate())\n tsv += '# Final generator info: %s\\n' % ', '.join(generator_info)\n file = save_to_file(\n output_path, '.intermediary-'+filename, 'tsv', tsv)\n print(\"(intermediary TSV written to \" + file + ')', flush=True)\n\n time_elapsed = time() - time_start\n\n print(\"*** Finished\", rounds, \"rounds of NIST tests for\", \"{:,}\".format(n_bits),\n \"bits each, in\", nicer_time(time_elapsed), \"seconds\", flush=True)\n\n # add info\n info = generator_info + [\n '', # empty line\n 'NIST SP 800-22 statistical test results',\n '%d rounds of %s bits long samples' % (rounds, \"{:,}\".format(n_bits)),\n 'Time elapsed for generation and testing: ' + nicer_time(time_elapsed)]\n\n # generate TSV data tables\n tsv = '# ' + '\\n# '.join(info) + '\\n#\\n'\n tsv += generate_tsv_results(all_p_values)\n tsv += '\\n# Final generator state: %s\\n' % str(generator.getstate())\n tsv += '# Final generator info: %s\\n' % ', '.join(generator.info())\n if output_path:\n file = save_to_file(output_path, filename, 'tsv', tsv)\n print(\"TSV written to \" + file, flush=True)\n else:\n print(tsv, flush=True)\n\n\ndef generate_tsv_results(data):\n tsv = ''\n\n for test_name, test_values in data.items():\n if not test_values:\n continue\n\n if type(test_values[0]) in (list, tuple):\n test_values = [','.join(map(str, vv)) for vv in test_values]\n\n line = [test_name] + test_values\n\n tsv += '\\t'.join(map(str, line)) + '\\n'\n\n return tsv\n\n\ndef save_to_file(path, basename, ext, data):\n if not ext.startswith('.'):\n ext = '.' + ext\n if not os.path.isdir(path):\n raise ValueError(\"Path \" + path + \" is not a valid directory\")\n basename = basename.replace('_', '-') # underscores are unsafe in TeX documents\n filename = get_available_filename(path + '/' + basename, ext)\n with open(filename, 'w') as f:\n f.write(data)\n return filename\n\n\ndef get_available_filename(file, ext):\n if not os.path.exists(file + ext):\n return file + ext\n patt = '%s(%d)%s'\n i = 2\n filename = patt % (file, i, ext)\n while os.path.exists(filename):\n i += 1\n filename = patt % (file, i, ext)\n return filename\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"czechnology/py-prng","sub_path":"run_all_tests.py","file_name":"run_all_tests.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"9743068720","text":"class Employee:\r\n\tdef __init__(self, first, last, pay):\r\n\t\tself.first = first\r\n\t\tself.last = last\r\n\t\tself.pay = pay\r\n\t\tself.email = first + '.'+ last + '@bla.com'\r\n\t\r\n\tdef fullname(self):\r\n\t\treturn '{} {}'.format(self.first, self.last)\r\n\t\r\n\t\r\n#before adding stuff to the class(init func)\r\nemp_1 = Employee('Cory', 'Barlog', 75000)\r\nemp_2 = Employee('Hannes', 'Hennes', 40000)\r\n\r\nprint(emp_1)\r\nprint(emp_2)\r\n\r\nemp_1.first = 'Cory'\r\nemp_1.last = 'Barlog'\r\nemp_1.email = 'Cory.Barlog@bla.com'\r\nemp_1.pay = 75000\r\n\r\nemp_2.first = 'Hannes'\r\nemp_2.last = 'Hennes'\r\nemp_2.email = 'Hannes.Hennes@bla.com'\r\nemp_2.pay = 40000\r\n\r\nprint(emp_1.email)\r\nprint(emp_2.email)\r\n\r\nprint(emp_2.fullname())\r\n","repo_name":"AntonStahl/Python-practise","sub_path":"OOP_learning.py","file_name":"OOP_learning.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70329059078","text":"\"\"\"\nAsync_port_scanner\n\"\"\"\nimport asyncio\nimport constant\nfrom contextlib import suppress\nimport time\n\n\nasync def connect_socket(port: int):\n \"\"\"\n Connect to given IP and port\n\n :param port: The port for the socket to connect.\n \"\"\"\n with suppress(Exception):\n await asyncio.open_connection(constant.ip, port)\n print(f\"port {port} found!\")\n\n\ndef time_measure(func):\n \"\"\"\n Measure time for execution of asynchronous method\n\n :param func: The asynchronous function to be measured.\n \"\"\"\n async def wrapper(*args, **params):\n start_time = time.time()\n result = await func(*args, **params)\n end_time = time.time()\n print(f\"{end_time - start_time} seconds took!\")\n return result\n return wrapper\n\n\n@time_measure\nasync def main():\n \"\"\"\n Execute connect_socket method through many ports asynchronously\n \"\"\"\n await asyncio.gather(*[connect_socket(port)\n for port in range(constant.start_port, constant.end_port)])\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"elad-rubinstein/port-scanner","sub_path":"async port scanner.py","file_name":"async port scanner.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22960687618","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom word_count.skeleton import fib\n\n__author__ = \"Jiri Harazim\"\n__copyright__ = \"Jiri Harazim\"\n__license__ = \"mit\"\n\n\ndef test_fib():\n assert fib(1) == 1\n assert fib(2) == 1\n assert fib(7) == 13\n with pytest.raises(AssertionError):\n fib(-10)\n","repo_name":"jiri-harazim/databricks-public","sub_path":"examples/ci_cd/word-count/tests/test_skeleton.py","file_name":"test_skeleton.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71961741316","text":"\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from propagated_basis_optimization import\nfrom src.propagated_basis_optimization import complex_initializer_random, complex_mul, split_complex, plot_modes\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\n\n\ndef dummy_image(slm_size):\n imag = np.zeros(shape=(slm_size, slm_size))\n hw = slm_size*7//16\n imag[hw:-hw, :] = 1.\n imag[:, hw:-hw] = 1.\n x = np.linspace(0., slm_size, slm_size) - slm_size/2\n y = x\n X, Y= np.meshgrid(x, y)\n imag[np.sqrt(X**2 + Y**2) < slm_size/4]=1.\n return imag\n\ndef plot_complex(image):\n fig, axs = plt.subplots(2)\n axs[0].imshow(np.abs(image))\n axs[0].axis('off')\n # plt.colorbar(ax=axs[0])\n axs[1].imshow(np.angle(image))\n axs[1].axis('off')\n # plt.colorbar()\n plt.show()\n\ndef plot_filter(slm_size):\n image = dummy_image()\n image_fft = np.fft.fftshift(np.fft.fft2(image))\n\n filter = np.zeros(shape=image.shape)\n filter_width = 5\n filter[\n int(slm_size/2 - filter_width//2): int(slm_size/2 + filter_width//2 + 1),\n int(slm_size/2 - filter_width//2): int(slm_size/2 + filter_width//2 + 1),\n ] = 1.\n image_filtered_fft = image_fft * filter\n\n plot_complex(image)\n plot_complex(image_fft)\n plot_complex(image_filtered_fft)\n plot_complex(np.fft.ifft2(np.fft.ifftshift(image_filtered_fft)))\n\ndef generate_fourier_modes(slm_size):\n n = slm_size\n pad_n = 80\n hole_size = (n - 2 * pad_n)\n\n print(\"generateing {} modes\".format(hole_size**2))\n\n\n filter = np.ones(shape=(hole_size, hole_size))\n filter = np.pad(filter, pad_width=pad_n, mode='constant')\n\n modes = []\n\n i_list, j_list = np.nonzero(filter)\n for i, j in zip(i_list, j_list):\n fd = np.zeros(shape=(n, n), dtype=np.complex128)\n fd[i, j] = 1.\n mode = np.fft.ifft2(np.fft.ifftshift(fd))\n modes.append(mode)\n\n modes = np.array(modes)\n # plot_modes(np.real(modes))\n # plot_modes(np.imag(modes))\n\n return np.transpose(modes, axes=(1, 2, 0))\n\n\ndef load_composed_modes():\n import pickle\n data = pickle.load(open(\"../src/data/two_ms.p\", \"rb\"))\n # return tf.transpose(data['forward'], perm=(1, 2, 0))\n return tf.transpose(data['forward'], perm=(1, 2, 0))\n\n\nif __name__=='__main__':\n @tf.function\n def forward():\n image_modes_real, image_modes_imag = complex_mul(\n modes_real, modes_imag,\n weights_real, weights_imag\n )\n\n image_field_real = tf.reduce_sum(image_modes_real, axis=-1)\n image_field_imag = tf.reduce_sum(image_modes_imag, axis=-1)\n\n image_intensity = image_field_real**2 + image_field_imag**2\n return image_intensity\n\n def loss():\n f = forward()\n ax1.clear()\n ax1.imshow(f)\n\n frames.append(f.numpy())\n\n #return tf.reduce_sum(tf.abs(image-f)**2) #\n return -tf.reduce_sum(image * f)/n_weights\n\n\n def update(_):\n with tf.GradientTape() as tape:\n tape.watch(weights_real)\n tape.watch(weights_real)\n current_loss = loss()\n grads = tape.gradient(target=current_loss, sources=[weights_real, weights_imag, ])\n\n # print(\"Grads: {}\".format(grads))\n\n optimizer.apply_gradients(zip(grads, [weights_real, weights_imag, ]))\n\n\n mode = 'phase'\n if mode == 'intensity_unbounded': # phase is fixed, mag is arbitrary\n # make pixels intensity only, unbounded\n mag = tf.math.sqrt(weights_real ** 2 + weights_imag ** 2)\n weights_real.assign(mag)\n weights_imag.assign(weights_imag * 0.)\n elif mode == 'intensity_bounded':\n weights_real.assign(tf.clip_by_value(weights_real, 0., 1.))\n weights_imag.assign(weights_imag * 0.)\n elif mode == 'phase': # mag is fixed at 1, phase is arbitrary\n angle = tf.math.atan2(weights_imag, weights_real)\n weights_real.assign(tf.math.cos(angle))\n weights_imag.assign(tf.math.sin(angle))\n elif mode == 'arb_bound': # mag is <= 1, phase is free\n angle = tf.math.atan2(weights_imag, weights_real)\n mag = tf.math.sqrt(weights_real ** 2 + weights_imag ** 2)\n mag = tf.clip_by_value(mag, 0., 1.)\n weights_real.assign(mag*tf.math.cos(angle))\n weights_imag.assign(mag*tf.math.sin(angle))\n\n print(current_loss.numpy())\n\n\n\n modes = generate_fourier_modes(slm_size=165)\n # modes = tf.concat(7*[modes, ], axis=-1) # apply degeneracy\n # modes = load_composed_modes()\n\n\n modes_real = np.real(modes)\n modes_imag = np.imag(modes)\n\n _, slm_size, _ = modes.shape\n\n image = dummy_image(slm_size)\n image = np.real(image)\n\n\n\n n_weights = modes.shape[-1]\n weights_real = tf.Variable(initial_value=np.random.rand(n_weights), dtype=tf.float64, trainable=True)\n weights_imag = tf.Variable(initial_value=np.random.rand(n_weights), dtype=tf.float64, trainable=True)\n\n\n optimizer = tf.optimizers.Adam(learning_rate=100)\n #optimizer = tf.optimizers.SGD()\n\n frames = []\n\n # style.use('fivethirtyeight')\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.axis('off')\n\n ani = animation.FuncAnimation(fig, update)\n plt.show()\n\n m = np.max(frames)\n frames = [np.array([frame, frame, frame])/m*255 for frame in frames]\n from array2gif import write_gif\n write_gif(frames, '../src/data/opt.gif', fps=10)\n","repo_name":"JamesWhitehead5/OneToTwoDim","sub_path":"scratch/tensorflow_reconstruct.py","file_name":"tensorflow_reconstruct.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11912088353","text":"import scrapy\nfrom scrapy import Request \nfrom scrapy.loader import ItemLoader\nfrom torrents_elastic.items import TorrentsElasticItem\n\n\nclass PiratbaySpider(scrapy.Spider):\n name = 'piratbay'\n allowed_domains = [\n 'apibay.org',\n 'thepiratebay.org'\n ]\n start_urls = ['https://apibay.org/precompiled/data_top100_recent.json']\n\n\n def __init__(self):\n\n\n self.torrent_template = 'https://apibay.org/t.php?id={}'\n\n\n\n def start_requests(self):\n yield Request(\n self.start_urls[0],\n callback=self.parse_initial_id\n )\n\n def parse_initial_id(self,response):\n max_id = max([int(torrent['id']) for torrent in response.json()])\n for torrent in range(max_id,1,-1):\n yield Request(\n self.torrent_template.format(torrent),\n meta ={\n 'id':torrent\n }\n )\n\n def parse(self, response):\n if self.not_exist(response) :\n self.logger.info('\\n\\n no torrent found with id {} \\n\\n'.format(response.meta['id']))\n return \n\n torrent = response.json()\n loader = ItemLoader(TorrentsElasticItem(),response)\n loader.add_value('id_value',torrent['id'])\n loader.add_value('website','piratbay')\n #loader.add_value('category',torrent['category'])\n loader.add_value('name',torrent['name'])\n loader.add_value('num_files',torrent['num_files'])\n loader.add_value('seeders',torrent['seeders'])\n loader.add_value('leechers',torrent['leechers'])\n loader.add_value('hash_info',torrent['info_hash'])\n loader.add_value('size',str(torrent['size']))\n loader.add_value('description',torrent['descr'])\n loader.add_value('status',torrent['status'])\n yield loader.load_item()\n\n def not_exist(self,response):\n return 'not exsist' in response.json()['name']\n\n","repo_name":"bensouiciakram/project25","sub_path":"torrents_elastic/spiders/piratbay.py","file_name":"piratbay.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13534198535","text":"import re\nfrom typing import List, Optional\n\nfrom bs4 import BeautifulSoup\n\nfrom grobber.decorators import cached_property\nfrom grobber.request import Request\nfrom . import register_stream\nfrom ..models import Stream\n\n\nclass RapidVideo(Stream):\n HOST = re.compile(r\"rapidvideo\\.\\w{2,3}\")\n\n @cached_property\n async def bs(self) -> BeautifulSoup:\n # get the cookie\n await self._req.response\n self._req.reload()\n # hopefully use the cookie?\n bs = await self._req.bs\n return bs\n\n @cached_property\n async def poster(self) -> Optional[str]:\n link_container = (await self.bs).select_one(\"video#videojs\")\n if not link_container:\n return None\n link = link_container.attrs.get(\"poster\")\n if link and await Request(link).head_success:\n return link\n\n @cached_property\n async def links(self) -> List[str]:\n bs = await self.bs\n\n sources = [Request(source[\"src\"], timeout=10) for source in bs.select(\"video#videojs source\")]\n return await Stream.get_successful_links(sources)\n\n @cached_property\n async def external(self) -> bool:\n return True\n\n\nregister_stream(RapidVideo)\n","repo_name":"myanimestream/grobber","sub_path":"grobber/anime/streams/rapidvideo.py","file_name":"rapidvideo.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"74729515717","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n #here first we are calculating inorder traversal of both tree and thrn compare them\n def inorder(root,arr,child):\n if root:\n inorder(root.left,arr,\"left\")\n arr.append((root.val,child))\n inorder(root.right,arr,\"right\")\n arr1 = []\n inorder(p,arr1,\"val\")\n arr2 = []\n inorder(q,arr2,\"val\")\n return arr1==arr2\n\n\n\n\n#2nd method without inorder traversal\nclass Solution:\n def isSameTree(self, p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n if not p and not q:\n return True\n elif not p or not q:\n return False\n if p.val == q.val:\n return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)\n return False\n","repo_name":"himanshush200599/codingPart","sub_path":"leetcode/easy/sameTree.py","file_name":"sameTree.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"42067216924","text":"import grpc\nimport logging\nimport sys\nfrom argparse import ArgumentParser\nfrom rchain.client import RClient\nfrom rchain.vault import VaultAPI\nfrom rchain.crypto import PrivateKey\n\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.INFO)\nroot = logging.getLogger()\nroot.addHandler(handler)\nroot.setLevel(logging.INFO)\n\nparser = ArgumentParser(description=\"Transfer rev to another vault\")\nparser.add_argument(\"-p\", \"--private-key\", action=\"store\", type=str, required=True, dest=\"private_key\", help=\"private key of the sender vault\")\nparser.add_argument(\"-r\", \"--receiver\", action=\"store\", type=str, required=True, dest=\"receiver\", help=\"receiver of the transfer\")\nparser.add_argument(\"-a\", \"--amount\", action=\"store\", type=int, required=True, dest=\"amount\", help=\"the amount of the transfer\")\n\nargs = parser.parse_args()\ntry:\n private_key = PrivateKey.from_hex(args.private_key)\nexcept:\n logging.error(\"The private you provided is not valid\")\n sys.exit(1)\n\nwith grpc.insecure_channel('localhost:40401') as channel:\n client = RClient(channel)\n vault = VaultAPI(client, private_key)\n vault.transfer(from_addr=None, to_addr=args.receiver, amount=args.amount)\n logging.info(\"Succeed transfer {} from {} to {} .\".format(args.amount, private_key.get_public_key().get_rev_address(), args.receiver))\n","repo_name":"nzpr/rshard","sub_path":"scripts/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"32060449208","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('invitations', '0002_auto_20150905_1747'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='invitation',\n name='status',\n field=models.CharField(default='pending', max_length=100, choices=[('pending', 'pending'), ('processing', 'processing'), ('error', 'error'), ('sent', 'sent'), ('accepted', 'accepted')]),\n ),\n ]\n","repo_name":"phildini/logtacts","sub_path":"invitations/migrations/0003_auto_20150906_0536.py","file_name":"0003_auto_20150906_0536.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"62"} +{"seq_id":"42425584110","text":"import os\nimport config\nimport json\nimport sqlite3\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom app.models import Producteurs\nfrom flask import Flask\n\napp = Flask(__name__)\ndb_name = 'app.db'\ndb = SQLAlchemy(app)\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(config.basedir, 'app.db')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\n\n# def fill_db():\n\n# with open(\"producteurs.json\", encoding='utf-8') as file:\n# file = json.load(file)\n# for producers in file:\n# print(db.session)\n# item = Producteurs(producers[\"name\"],\n# producers[\"cat\"],\n# producers[\"addr\"],\n# int(producers[\"cp\"]),\n# producers[\"ville\"],\n# producers[\"dept\"],\n# producers[\"contact\"],\n# producers[\"lat\"],\n# producers[\"lon\"])\n# db.session.add(item)\n# db.session.commit()\n# # create a message to send to the console\n# print(\"The producer\" + producers[\"name\"] + \"has been submitted.\")\n# break\n\n# fill_db()\n\nconnection = sqlite3.connect(\"app.db\")\ncursor = connection.cursor()\n\ndef fill_db():\n\n with open(\"producteurs.json\", encoding='utf-8') as file:\n file = json.load(file)\n for producers in file:\n cat_str = \"\"\n for i in producers[\"cat\"]:\n cat_str += i\n sql = \"\"\"INSERT INTO producteurs (name,\n cat, addr, cp, ville, dept, contact, lat, lon)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\"\n val = (str(producers[\"name\"]),\n str(producers[\"cat\"]),\n str(producers[\"addr\"]),\n str(producers[\"cp\"]),\n str(producers[\"ville\"]),\n str(producers[\"dept\"]),\n str(producers[\"contact\"]),\n str(producers[\"lat\"]),\n str(producers[\"lon\"]))\n cursor.execute(sql, val)\n connection.commit()\n\nfill_db()\n","repo_name":"elmasta/tourisme","sub_path":"db_script.py","file_name":"db_script.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"16698507410","text":"import curses\nimport re\n\ndef scrape(message):\n\tmessage = re.sub(r\"(`[A-Za-z])\", \"\", message)\n\tmessage = message.replace(\"`\", \"\")\n\treturn message\n\nclass Graphics():\n\tdef __init__(self, stdscr):\n\t\tself.stdscr = stdscr\n\t\tself.chatbuffer = {}\n\t\tself.inputbuffer = \"\"\n\t\tself.linebuffer = []\n\t\tself.helpindex = 0\n\t\tself.regindex = 0\n\t\tself.channel = ''\n\t\tself.user = ''\n\n\t\tself.chatbox_hwyx = (curses.LINES - 3, curses.COLS, 0, 0)\n\t\tpos = curses.LINES-1\n\t\tself.inputbox_hwyx = (3, curses.COLS, curses.LINES - 3, 0)\n\t\t\n\t\tself.win_chatbox = stdscr.derwin(*self.chatbox_hwyx)\n\t\tself.win_inputbox = stdscr.derwin(*self.inputbox_hwyx)\n\t\t\n\t\tself.resize()\n\n\tdef inject_chat(self, msg):\n\t\t\n\t\tself.linebuffer.append(msg)\n\t\tself.render_chatbox()\n\t\n\tdef resize(self):\n\t\th, w = self.stdscr.getmaxyx()\n\t\ttry:\n\t\t\tself.win_chatbox.mvwin(0,0)\n\t\t\tself.win_chatbox.resize(h-3, w)\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.win_inputbox.mvwin(h-3, 0)\n\t\t\tself.win_inputbox.resize(3, w)\n\t\texcept:\n\t\t\tpass\n\n\t\tself.render()\n\n\tdef render(self):\n\t\th, w = self.stdscr.getmaxyx()\n\t\tself.stdscr.clear()\n\t\tself.stdscr.refresh()\n\t\tself.render_chatbox()\n\t\tself.render_inputbox()\n\n\tdef put_buffer(self, buff):\n\t\tif buff != {}:\n\t\t\tfor i in buff:\n\t\t\t\tif 'channel' in i:\n\t\t\t\t\tif i['channel'] == self.channel:\n\t\t\t\t\t\tputting = \"{} // {} :\".format(i['t'], i['from_user']) \n\t\t\t\t\t\tif not(putting in self.linebuffer):\n\t\t\t\t\t\t\tself.linebuffer.append(putting)\n\t\t\t\t\t\t\tthinglist = i['msg'].splitlines()\n\t\t\t\t\t\t\tfor i in thinglist:\n\t\t\t\t\t\t\t\tif i:\n\t\t\t\t\t\t\t\t\tself.linebuffer.append( scrape( i ) )\n\t\t\t\t\t\t\t\tif len(i) > curses.COLS:\n\t\t\t\t\t\t\t\t\titerator = int(len(i) / curses.COLS)\n\t\t\t\t\t\t\t\t\twhile iterator > 0:\n\t\t\t\t\t\t\t\t\t\tself.linebuffer.append( \"\" )\n\t\t\t\t\t\t\t\t\t\titerator -= 1\n\t\t\t\t\t\t\tself.linebuffer.append( \"\" )\n\t\t\n\t\tself.render_chatbox()\n\n\tdef switch_channel_user(self, channel, user):\n\t\tself.user = user\n\t\tself.channel = channel\n\t\tself.regindex = 0\n\t\tself.render()\n\t\n\tdef render_chatbox(self):\n\t\tself.win_chatbox.clear()\n\t\th, w = self.win_chatbox.getmaxyx()\n\t\tj = len(self.linebuffer) - h\n\t\tif j < 0:\n\t\t\tj = 0\n\t\tfor i in range(min(h, len(self.linebuffer))):\n\t\t\ttry:\n\t\t\t\tself.win_chatbox.addstr(i, 0, self.linebuffer[j])\n\t\t\texcept:\n\t\t\t\tself.inject_chat(\"\")\n\t\t\tj += 1\n\t\t\tself.win_chatbox.refresh()\n\t\tself.stdscr.refresh()\n\t\n\tdef render_inputbox(self):\n\t\th, w = self.win_inputbox.getmaxyx()\n\t\tself.win_inputbox.clear()\n\t\tstring = \"{} @ {} : {}\".format(self.user, self.channel, self.inputbuffer)\n\t\tif len(string) > w - 5:\n\t\t\tstart = len(string) - w + 5\n\t\t\tstring = string[start:]\n\t\tself.win_inputbox.addstr(1, 0, string)\n\t\tself.win_inputbox.refresh()\n\t\t\n\n\n\tdef prompt(self, msg):\n\t\tself.inputbuffer = msg\n\t\tself.render_inputbox()\n\t\tres = self.wait_input()\n\t\tres = res[len(msg):]\n\t\treturn res\n\n\tdef wait_input(self, prompt=\"\"):\n\t\tself.inputbuffer = prompt\n\t\tself.stdscr.refresh()\n\t\tself.render_inputbox()\n\t\tself.win_inputbox.cursyncup()\n\t\tlast = -1\n\t\twhile last != ord('\\n'):\n\t\t\tlast = self.stdscr.getch()\n\t\t\tif last == ord('\\n'):\n\t\t\t\ttmp = self.inputbuffer\n\t\t\t\tself.inputbuffer = \"\"\n\t\t\t\tself.render_inputbox()\n\t\t\t\tself.win_inputbox.cursyncup()\n\t\t\t\treturn tmp[len(prompt):]\n\t\t\telif last == curses.KEY_BACKSPACE or last == 127:\n\t\t\t\tif len(self.inputbuffer) > len(prompt):\n\t\t\t\t\tself.inputbuffer = self.inputbuffer[:-1]\n\t\t\t\t\tself.render_inputbox()\n\t\t\telif last == curses.KEY_RESIZE:\n\t\t\t\tself.resize()\n\t\t\telif 32 <= last <= 126:\n\t\t\t\tself.inputbuffer += chr(last)\n\t\t\t\tself.render_inputbox()\n\tdef kill(self):\n\t\tcurses.echo()\n\t\tcurses.nocbreak()\n\t\tself.stdscr.keypad(0)\n\t\tcurses.endwin()\n","repo_name":"bitcrushr/chatmud","sub_path":"cgraphics.py","file_name":"cgraphics.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"27502704199","text":"from unittest import TestCase\nfrom tornado.escape import json_decode\nfrom elephunk.handlers import IndexesDatabaseHandler\nfrom elephunk.database import Row\n\nclass IndexesDatabaseHandlerTest(TestCase):\n\n def test_build_index_json(self):\n tables = [\n Row(relid=1, schemaname='public', relname='table1', seq_scan=20, seq_tup_read=200),\n Row(relid=2, schemaname='public', relname='table2', seq_scan=10, seq_tup_read=100)\n ]\n indexes = [\n Row(relid=1, schemaname='public', relname='table1', indexrelname='index1', idx_scan=100, idx_tup_read=700, idx_tup_fetch=80, indisunique=True),\n Row(relid=1, schemaname='public', relname='table1', indexrelname='index2', idx_scan=50, idx_tup_read=70, idx_tup_fetch=8, indisunique=False),\n Row(relid=2, schemaname='public', relname='table2', indexrelname='index3', idx_scan=100, idx_tup_read=700, idx_tup_fetch=80, indisunique=True)\n ]\n\n json = json_decode(IndexesDatabaseHandler.build_json('database', tables, indexes))\n self.assertEquals('database', json['name'])\n self.assertEquals(['public.table1', 'public.table2'], [c['name'] for c in json['children']])\n\n def test_map_table(self):\n table = Row(relid=1, schemaname='public', relname='table1', seq_scan=20, seq_tup_read=200)\n mapped_table = IndexesDatabaseHandler.map_table(table, {})\n\n self.assertEquals('public.table1', mapped_table['name'])\n self.assertEquals('public.table1.unindexed', mapped_table['children'][0]['name'])\n self.assertEquals(20, mapped_table['children'][0]['scans'])\n self.assertEquals(200, mapped_table['children'][0]['tuples'])\n self.assertEquals(False, mapped_table['children'][0]['isIndex'])\n\n def test_map_index(self):\n index = Row(relid=1, schemaname='public', relname='table1', indexrelname='index1', idx_scan=100, idx_tup_read=700, idx_tup_fetch=80, indisunique=True)\n mapped_index = IndexesDatabaseHandler.map_index(index)\n self.assertEquals('public.table1.index1', mapped_index['name'])\n self.assertEquals(100, mapped_index['scans'])\n self.assertEquals(780, mapped_index['tuples'])\n self.assertEquals(True, mapped_index['isUnique'])\n\n\n\n","repo_name":"pitluga/elephunk","sub_path":"tests/elephunk/handlers/indexes_database_handler_test.py","file_name":"indexes_database_handler_test.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1199582915","text":"import numpy as np\nimport pyfastx\nimport phanotate_modules.functions as phano\nshotsetds=[]\nshotsetss=[]\nfa_ds = pyfastx.Fasta('ds-linear.fasta')\nfa_ss = pyfastx.Fasta('ss-circular.fasta')\ndef shot(str):\n length=len(str)\n start=np.random.randint(length,10)\n return str[start:start+1500]\nfor itm in fa_ds:\n for _ in range(10):\n shotsetds.append(shot(itm.seq))\nfor itm in fa_ss:\n for _ in range(10):\n shotsetss.append(shot(itm.seq))\nshotsetds=[phano.get_backgroud_rbs(i) for i in shotsetds]\n# shotsetds = torch.Tensor(shotsetds)\n# shotsetds = torch.Tensor.reshape(shotsetds,(-1,28))\nshotsetss=[phano.get_backgroud_rbs(i) for i in shotsetss]\n# shotsetss = torch.Tensor(shotsetss)\n# shotsetss = torch.Tensor.reshape(shotsetss,(-1,28))\nshotsetlist=shotsetds+shotsetss\nshotsetlist=np.array(shotsetlist,dtype=np.float32)\nlabels1=np.ones(544)\nlabels2=np.zeros(484)\nlabel=[labels1,labels2]\nlabel=np.r_[labels1,labels2]\nlabel=np.array(label,dtype=np.int)\nfrom sklearn.model_selection import train_test_split\n#x为数���集的feature熟悉,y为label.\nx_train, x_test, y_train, y_test = train_test_split(shotsetlist, label, test_size = 0.2,random_state=4)\nshotset=CustomDataset(torch.Tensor(x_train),y_train)\nmyloader=data.DataLoader(shotset,batch_size=15)\ntestset=CustomDataset(torch.Tensor(x_test),y_test)\ntestloader=data.DataLoader(shotset,batch_size=15)\nd2l.train_ch3(model, myloader, testloader, loss, num_epochs, trainer)","repo_name":"YangyiLab/phage-gene-indentification","sub_path":"phage-classifer/count_dna.py","file_name":"count_dna.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"28339208117","text":"import copy\n\n\"\"\"\nCheck if line collide anoter line\nx1, y1, x2, y2 : First line\nx3, y3, x4, y4 : Second line\n\"\"\"\ndef isLineCollidesLine(x1, y1, x2, y2, x3, y3, x4, y4) -> bool:\n denominator: float = ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))\n numerator1: float = ((x4-x3)*(y1-y3) - (y4-y3)*(x1-x3))\n numerator2: float = ((x2-x1)*(y1-y3) - (y2-y1)*(x1-x3))\n\n # Detect coincident lines (has a problem, read below)\n if (denominator == 0) :\n return False#numerator1 == 0 and numerator2 == 0\n\n r: float = numerator1 / denominator\n s: float = numerator2 / denominator\n\n return (0 <= r <= 1) and (0 <= s <= 1)\n\n\"\"\"\nIndicates if a rectangle collides a line\nline : [x1, y1, x2, y2]\nrectangle [x1, y1, x2, y2]\n\"\"\"\ndef rectangleCollidesLine(line: list, rectangle: list) -> bool:\n if isLineCollidesLine(line[0], line[1], line[2], line[3], \n rectangle[0], rectangle[1], rectangle[0], rectangle[3]):\n return True\n\n if isLineCollidesLine(line[0], line[1], line[2], line[3], \n rectangle[2], rectangle[1], rectangle[2], rectangle[3]):\n return True\n\n if isLineCollidesLine(line[0], line[1], line[2], line[3], \n rectangle[0], rectangle[1], rectangle[2], rectangle[1]):\n return True\n\n if isLineCollidesLine(line[0], line[1], line[2], line[3], \n rectangle[0], rectangle[3], rectangle[2], rectangle[3]):\n return True\n\n return False\n\n\"\"\"\nRecturn position of a certain corners of an obstacle\ndict: the obstacle\nchoosenCorner: 1 left-up, 2 right-up, 3 right-down, 4 left-down\n\"\"\"\ndef computeRectangleCorner(obstacle: dict, chosenCorner: int):\n \"\"\"result: list = copy.copy(obstacle[\"position\"])\n\n if chosenCorner == 1 or chosenCorner == 2:\n result[0] += obstacle[\"size\"][0]\n\n if chosenCorner >= 2:\n result[1] += obstacle[\"size\"][1]\n\n return result\"\"\"\n\n if chosenCorner == 0:\n return [obstacle[\"position\"][0] - 0.001, obstacle[\"position\"][1] - 0.001]\n\n if chosenCorner == 1:\n return [obstacle[\"position\"][0] + obstacle[\"size\"][0] + 0.001, obstacle[\"position\"][1] - 0.001]\n\n if chosenCorner == 2:\n return [obstacle[\"position\"][0] + obstacle[\"size\"][0] + 0.001, obstacle[\"position\"][1] + obstacle[\"size\"][1] + 0.001]\n\n if chosenCorner == 3:\n return [obstacle[\"position\"][0] - 0.001, obstacle[\"position\"][1] + obstacle[\"size\"][1] + 0.001]\n\n\"\"\"\nReturn the number of vertices of a certain map\nmap: the map\n\"\"\"\ndef getNumberVertex(map: dict):\n return len(map[\"wastes\"]) + 1 + 4 * len(map[\"obstacles\"])\n\n\"\"\"\nReturn the position of a certain vextex\nvextex 0 (or 1 in math) corresponds to the robot position\n// between 1 and number_wastes, to the (i-1)-wastes\nand after number_wastes, an obstacles's corner\n\"\"\"\ndef getPosition(map: dict, vertex: int) -> list:\n if vertex == 0:\n return map[\"robot\"][\"position\"]\n\n vertex -= 1\n\n if vertex < len(map[\"wastes\"]):\n return copy.copy(map[\"wastes\"][vertex][\"position\"])\n \n start: int = vertex - len(map[\"wastes\"])\n\n return computeRectangleCorner(map[\"obstacles\"][start // 4], start % 4)\n\n\"\"\"\nTransform a path to a graph given a number of vertices\nnumberPoint: Number of vertices\npath: list of visited vertices\n\"\"\"\ndef path_to_graph(numberPoint, path):\n result: list = []\n for i in range(numberPoint):\n result.append([])\n\n for i in range(len(path) - 1):\n result[path[i]].append(path[i + 1])\n result[path[i + 1]].append(path[i])\n\n return result","repo_name":"MightyCode/enseirb-s6-grapheramassage","sub_path":"pmath.py","file_name":"pmath.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25734572580","text":"from textblob import TextBlob\nfrom textblob import Word\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nimport sys\nimport nltk\n\n\nnltk.download('averaged_perceptron_tagger')\nnltk.download('punkt')\n\nstop_words = set(stopwords.words('english'))\n\nlemmatizer = WordNetLemmatizer()\n\ndef parse(desc):\n tokens = [w for (w, pos) in TextBlob(desc).pos_tags if (pos[0] == 'V' or pos[0] == 'N') ]\n lemm = []\n for token in tokens:\n if token not in stop_words:\n lemm.append(lemmatizer.lemmatize(token))\n lemm = (\", \".join(lemm))\n return lemm\n\ndef main():\n # Use the loaded model to make predictions\n parsed = parse(str(sys.argv[1]))\n\n print(parsed)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Chewposhi/Information-Retrieval","sub_path":"server/desc_parse.py","file_name":"desc_parse.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23597185128","text":"import random\r\nfrom pathlib import Path\r\nimport os\r\n\r\nclass Tic:\r\n\r\n def __init__(self) -> None:\r\n self.a1 = '-'\r\n self.a2 = '-'\r\n self.a3 = '-'\r\n\r\n self.b1 = '-'\r\n self.b2 = '-'\r\n self.b3 = '-'\r\n\r\n self.c1 = '-'\r\n self.c2 = '-'\r\n self.c3 = '-'\r\n self.counter = 0\r\n\r\n self.fila = [self.a1, self.a2, self.a3]\r\n self.filb = [self.b1, self.b2, self.b3]\r\n self.filc = [self.c1, self.c2, self.c3]\r\n\r\n self.col1 = [self.a1, self.b1, self.c1]\r\n self.col2 = [self.a2, self.b2, self.c2]\r\n self.col3 = [self.a3, self.b3, self.c3]\r\n\r\n self.diag1 = [self.a1, self.b2, self.c3]\r\n self.diag2 = [self.a3, self.b2, self.c1]\r\n\r\n self.todas = [self.fila, self.filb, self.filc, self.col1, self.col2, self.col3, self.diag1, self.diag2]\r\n\r\n self.disponibles = ['a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3']\r\n\r\n self.turno = 'x'\r\n\r\n self.jujador1Name = 'CPU 1'\r\n self.jujador2Name = 'CPU 2'\r\n\r\n self.jujador1Points = 0\r\n self.jujador2Points = 0\r\n \r\n self.partidoNUM = 0\r\n\r\n self.turno1v1 = ''\r\n\r\n self.corners = ['a1','a3','c1','c3']\r\n\r\n self.FILa = ['a1', 'a2', 'a3']\r\n self.FILb = ['b1', 'b2', 'b3']\r\n self.FILc = ['c1', 'c2', 'c3']\r\n\r\n self.COL1 = ['a1', 'b1', 'c1']\r\n self.COL2 = ['a2', 'b2', 'c2']\r\n self.COL3 = ['a3', 'b3', 'c3']\r\n\r\n self.DIAG1 = ['a1', 'b2', 'c3']\r\n self.DIAG2 = ['a3', 'b2', 'c1']\r\n\r\n self.square = [self.fila, self.col1, self.filc, self.col3]\r\n self.squaredic = {0:self.FILa, 1:self.COL1, 2:self.FILc, 3:self.COL3}\r\n\r\n self.dicNumToLinea:dict[list] = {0:self.FILa, 1:self.FILb, 2:self.FILc, 3:self.COL1, 4:self.COL2, 5:self.COL3, 6:self.DIAG1, 7:self.DIAG2}\r\n\r\n self.double1 = ['a1', 'a2', 'a3', 'b3', 'c3']\r\n self.double2 = ['a1', 'b1', 'c1', 'c2', 'c3']\r\n self.double3 = ['c1', 'b1', 'a1', 'a2', 'a3']\r\n self.double4 = ['c1', 'c2', 'c3', 'b3', 'a3']\r\n self.DOUBLE1 = [self.a1, self.a2, self.a3, self.b3, self.c3]\r\n self.DOUBLE2 = [self.a1, self.b1, self.c1, self.c2, self.c3]\r\n self.DOUBLE3 = [self.c1, self.b1, self.a1, self.a2, self.a3]\r\n self.DOUBLE4 = [self.c1, self.c2, self.c3, self.b3, self.a3]\r\n\r\n self.cpu_center = 0\r\n\r\n self.textoganador = 'ERROR'\r\n\r\n self.mode = '' #PvP, PvCPU, CPUvCPU\r\n self.diffculty = '' #PvP, PvCPU = EASY, HARD, CPUvCPU = easyX2 easyHARD hardX2\r\n\r\n self.empezo = ''\r\n\r\n self.mem1 = []\r\n self.mem2 = []\r\n self.mem3 = []\r\n self.mem4 = []\r\n self.mem5 = []\r\n self.mems = [self.mem1, self.mem2, self.mem3, self.mem4, self.mem5]\r\n\r\n def clear(self) -> None:\r\n \r\n self.a1 = '-'\r\n self.a2 = '-'\r\n self.a3 = '-'\r\n\r\n self.b1 = '-'\r\n self.b2 = '-'\r\n self.b3 = '-'\r\n\r\n self.c1 = '-'\r\n self.c2 = '-'\r\n self.c3 = '-'\r\n self.counter = 0\r\n\r\n self.fila = [self.a1, self.a2, self.a3]\r\n self.filb = [self.b1, self.b2, self.b3]\r\n self.filc = [self.c1, self.c2, self.c3]\r\n\r\n self.col1 = [self.a1, self.b1, self.c1]\r\n self.col2 = [self.a2, self.b2, self.c2]\r\n self.col3 = [self.a3, self.b3, self.c3]\r\n\r\n self.diag1 = [self.a1, self.b2, self.c3]\r\n self.diag2 = [self.a3, self.b2, self.c1]\r\n\r\n self.todas = [self.fila, self.filb, self.filc, self.col1, self.col2, self.col3, self.diag1, self.diag2]\r\n\r\n self.disponibles = ['a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3']\r\n\r\n self.turno = 'x'\r\n\r\n self.square = [self.fila, self.col1, self.filc, self.col3]\r\n self.squaredic = {0:self.FILa, 1:self.COL1, 2:self.FILc, 3:self.COL3}\r\n\r\n self.corners = ['a1','a3','c1','c3']\r\n\r\n self.double1 = ['a1', 'a2', 'a3', 'b3', 'c3']\r\n self.double2 = ['a1', 'b1', 'c1', 'c2', 'c3']\r\n self.DOUBLE1 = [self.a1, self.a2, self.a3, self.b3, self.c3]\r\n self.DOUBLE2 = [self.a1, self.b1, self.c1, self.c2, self.c3]\r\n\r\n self.cpu_center = 0\r\n\r\n self.textoganador = 'ERROR'\r\n\r\n self.mem1 = []\r\n self.mem2 = []\r\n self.mem3 = []\r\n self.mem4 = []\r\n self.mem5 = []\r\n \r\n def saltear(self) -> list[list[str]]:\r\n i:int = 4\r\n \r\n while len(self.mems[i]) == 0:\r\n if len(self.mems[i]) == 0:\r\n i = i-1\r\n\r\n return self.mems[0:i+1]\r\n \r\n def text(self) -> None:\r\n\r\n dir_path = Path.cwd().joinpath(r\"Tateti scores\")\r\n file_name = self.jujador1Name + \" vs \" + self.jujador2Name + '.txt'\r\n file_path = dir_path.joinpath(file_name)\r\n ls = self.saltear()\r\n # check if directory exists, if not creates it\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n # check if directory exists (fail safe)\r\n if dir_path.is_dir():\r\n\r\n # check if file already exists\r\n if file_path.is_file():\r\n\r\n with open(dir_path.joinpath(file_name), \"a\") as f:\r\n f.write('\\n' + self.jujador1Name + \": \" + str(self.jujador1Points) + '\\t|\\t' + self.jujador2Name + \": \" + str(self.jujador2Points)+ \"\\t|\" + self.textoganador+'\\n')\r\n for x in ls:\r\n f.write(x[0:3][0]+x[0:3][1]+x[0:3][2]+'\\t\\t')\r\n f.write('\\n')\r\n for x in ls:\r\n f.write(x[3:6][0]+x[3:6][1]+x[3:6][2]+'\\t\\t')\r\n f.write('\\n')\r\n for x in ls:\r\n f.write(x[6:9][0]+x[6:9][1]+x[6:9][2]+'\\t\\t')\r\n else:\r\n with open (dir_path.joinpath(file_name),'w') as f: \r\n f.write('\\n' + self.jujador1Name + \": \" + str(self.jujador1Points) + '\\t|\\t' + self.jujador2Name + \": \" + str(self.jujador2Points)+ \"\\t|\" + self.textoganador+'\\n')\r\n for x in ls:\r\n f.write(x[0:3][0]+x[0:3][1]+x[0:3][2]+'\\t\\t')\r\n f.write('\\n')\r\n for x in ls:\r\n f.write(x[3:6][0]+x[3:6][1]+x[3:6][2]+'\\t\\t')\r\n f.write('\\n')\r\n for x in ls:\r\n f.write(x[6:9][0]+x[6:9][1]+x[6:9][2]+'\\t\\t')\r\n print('File was created.')\r\n\r\n def analisis(self) -> bool:\r\n #Mira todas linea de 3 en la escuadra y busca si hay 3 iguales\r\n #Si hay 3 iguales -> Devuelve False, sino True\r\n if self.counter >= 5: # minima cantidad para poder ganar\r\n\r\n for ls in self.todas:\r\n if (len(set(ls)) == 1 and ls[0] != '-'):\r\n return False #False == hay ganador\r\n return True\r\n \r\n def cambiar_turno(self) -> None:\r\n #Pasa de x a o y de o a x\r\n if self.turno == 'x':\r\n self.turno = 'o'\r\n else: self.turno = 'x'\r\n\r\n #Cambia de turno usando el string del jugador\r\n if self.turno1v1 == self.jujador1Name:\r\n self.turno1v1 = self.jujador2Name\r\n else: \r\n self.turno1v1 = self.jujador1Name\r\n\r\n def assign(self, choice:str) -> None:\r\n #asigna en las listas que usa el AI\r\n #Aumenta el counter de cantidad de moviminetos\r\n #llama a cambiar_turno()\r\n\r\n if choice in self.disponibles:\r\n self.disponibles.remove(choice)\r\n else:\r\n print(\"Rompiste algo\")\r\n pass\r\n self.counter += 1\r\n\r\n if choice == 'a1':\r\n self.a1 = self.turno\r\n self.fila[0] = self.turno\r\n self.col1[0] = self.turno\r\n self.diag1[0] = self.turno\r\n self.DOUBLE1[0] = self.turno\r\n self.DOUBLE2[0] = self.turno\r\n self.DOUBLE3[2] = self.turno\r\n\r\n elif choice == 'a2':\r\n self.a2 = self.turno\r\n self.fila[1] = self.turno\r\n self.col2[0] = self.turno\r\n self.DOUBLE1[1] = self.turno\r\n self.DOUBLE3[3] = self.turno\r\n\r\n elif choice == 'a3':\r\n self.a3 = self.turno\r\n self.fila[2] = self.turno\r\n self.col3[0] = self.turno\r\n self.diag2[0] = self.turno\r\n self.DOUBLE1[2] = self.turno\r\n self.DOUBLE3[4] = self.turno\r\n self.DOUBLE4[4] = self.turno\r\n\r\n elif choice == 'b1':\r\n self.b1 = self.turno\r\n self.filb[0] = self.turno\r\n self.col1[1] = self.turno\r\n self.DOUBLE2[1] = self.turno\r\n self.DOUBLE3[1] = self.turno\r\n\r\n elif choice == 'b2':\r\n self.b2 = self.turno\r\n self.filb[1] = self.turno\r\n self.col2[1] = self.turno\r\n self.diag1[1] = self.turno\r\n self.diag2[1] = self.turno\r\n \r\n elif choice == 'b3':\r\n self.b3 = self.turno\r\n self.filb[2] = self.turno\r\n self.col3[1] = self.turno\r\n self.DOUBLE1[3] = self.turno\r\n self.DOUBLE4[3] = self.turno\r\n\r\n elif choice == 'c1':\r\n self.c1 = self.turno\r\n self.filc[0] = self.turno\r\n self.col1[2] = self.turno\r\n self.diag2[2] = self.turno\r\n self.DOUBLE2[2] = self.turno\r\n self.DOUBLE3[0] = self.turno\r\n self.DOUBLE4[0] = self.turno\r\n\r\n elif choice == 'c2':\r\n self.c2 = self.turno\r\n self.filc[1] = self.turno\r\n self.col2[2] = self.turno\r\n self.DOUBLE2[2] = self.turno\r\n self.DOUBLE4[1] = self.turno\r\n\r\n elif choice == 'c3':\r\n self.c3 = self.turno\r\n self.filc[2] = self.turno\r\n self.col3[2] = self.turno\r\n self.diag1[2] = self.turno\r\n self.DOUBLE1[4] = self.turno\r\n self.DOUBLE2[4] = self.turno\r\n self.DOUBLE4[3] = self.turno\r\n\r\n self.cambiar_turno()\r\n\r\n def seleccionar(self) -> None:\r\n #Consigue el movimiento que quiere hacer el usuario\r\n #Llama a assaign()\r\n choice:str = input(\"INPUT:\")\r\n\r\n if choice in self.disponibles:\r\n self.assign(choice)\r\n else:\r\n print(\"Volve a intentar\")\r\n self.seleccionar()\r\n\r\n def Empezar(self) -> None:\r\n self.Menu()\r\n\r\n def ganador1v1(self) -> None:\r\n # Imprime el partido\r\n # Suma los puntos acorde el reustado, W +1 E +0.5 L +0\r\n # Imprime si hubo empate o hubo ganador en el match\r\n # Decide quien gana depende del dato turno1v1\r\n # Imprime cuanto puntos lleva cada uno en el game\r\n # Si hay ganador gaurda en memoria el partido, limited to 5\r\n\r\n print(self)\r\n if self.analisis():\r\n print(\"Empate\")\r\n self.jujador1Points += 0.5\r\n self.jujador2Points += 0.5\r\n else: \r\n print(\"Gano\", self.turno1v1)\r\n if self.turno1v1 == self.jujador1Name:\r\n self.jujador1Points += 1.0\r\n else: self.jujador2Points += 1.0\r\n\r\n out = 0\r\n for MEM in self.mems:\r\n if out == 0 and len(MEM) == 0:\r\n MEM.append(self.a1)\r\n MEM.append(self.a2)\r\n MEM.append(self.a3)\r\n MEM.append(self.b1)\r\n MEM.append(self.b2)\r\n MEM.append(self.b3)\r\n MEM.append(self.c1)\r\n MEM.append(self.c2)\r\n MEM.append(self.c3)\r\n out = 1\r\n\r\n print(self.jujador1Name, self.jujador1Points, \"puntos\")\r\n print(self.jujador2Name, self.jujador2Points, \"puntos\")\r\n return\r\n \r\n def finish(self) -> None:\r\n #Imprime quien gano el game\r\n # quien gano depende de quien tenga mas puntos caundo se la llama\r\n\r\n if self.jujador1Points > self.jujador2Points:\r\n self.textoganador = 'Ganador: ' + self.jujador1Name\r\n \r\n elif self.jujador1Points < self.jujador2Points:\r\n self.textoganador = 'Ganador: ' + self.jujador2Name\r\n\r\n else: \r\n self.textoganador = 'Empate'\r\n \r\n print(self.textoganador)\r\n\r\n def seguir(self, cantidad:int) -> None:\r\n # Permite continuar el Best of manteniendo los datos\r\n\r\n print(\"Best of\", cantidad+2, \"y/n\")\r\n elejir = input()\r\n\r\n if elejir == \"y\":\r\n self.juego(cantidad+2)\r\n\r\n elif elejir == \"n\":\r\n self.text()\r\n return\r\n else:\r\n print(\"Volve a intentar\")\r\n self.seguir(cantidad)\r\n\r\n def easySelect(self) -> None:\r\n\r\n choice:str = random.choice(self.disponibles)\r\n self.assign(choice)\r\n \r\n def ezwin(self) -> bool:\r\n #busca si hay un movimiento que gana y lo juega\r\n # Si una fila tiene dos iguales y un vacio, elije la posicion vacia\r\n # Si hay dos lineas que cumplen esto, prefiere la linea que gane el match\r\n # Llama a double_trouble()\r\n\r\n counter_linea:int = 0\r\n defend:list[str] = []\r\n\r\n for linea in self.todas:\r\n\r\n cantidad_de_x:int = 0\r\n cantidad_de_o:int = 0\r\n\r\n for coord in linea: \r\n if coord == 'x':\r\n cantidad_de_x += 1\r\n if coord == 'o':\r\n cantidad_de_o += 1\r\n\r\n if (self.turno == 'x' and cantidad_de_x == 2 and cantidad_de_o == 0) or (self.turno == 'o' and cantidad_de_o == 2 and cantidad_de_x == 0): #busca las lineas con dos iguales y un vacio\r\n win = list(set(self.dicNumToLinea[counter_linea]) & set(self.disponibles)) #elije el vacio \r\n self.assign(win[0])\r\n return False\r\n \r\n if (self.turno == 'x' and cantidad_de_o == 2 and cantidad_de_x == 0) or (self.turno == 'o' and cantidad_de_x == 2 and cantidad_de_o == 0): #busca las lineas con dos iguales y un vacio\r\n defend = list(set(self.dicNumToLinea[counter_linea]) & set(self.disponibles)) #elije el vacio\r\n \r\n counter_linea += 1\r\n\r\n if len(defend) != 0: #para generar preferencia\r\n self.assign(defend[0])\r\n return False\r\n return self.double_trouble()\r\n \r\n def double_trouble(self) -> bool:\r\n\r\n # Busca si hay un moviemto que genere dos mov distintos que ganen en el proximo turno y lo juega\r\n # Esto esta limitado a los posibles que pueden generar el AI, hay otros que no va a ver pero nunca estaria en \r\n # una posicion para que pasen\r\n # Esto es para defender y atacar\r\n \r\n count_x_double_1:int = 0\r\n count_x_double_2:int = 0\r\n count_x_double_3:int = 0\r\n count_x_double_4:int = 0\r\n\r\n for coord in self.DOUBLE1:\r\n if coord == 'x':\r\n count_x_double_1 += 1\r\n for coord in self.DOUBLE2:\r\n if coord == 'x':\r\n count_x_double_2 += 1\r\n for coord in self.DOUBLE3:\r\n if coord == 'x':\r\n count_x_double_1 += 1\r\n for coord in self.DOUBLE4:\r\n if coord == 'x':\r\n count_x_double_2 += 1\r\n\r\n if count_x_double_1 == 2 and len(set(self.DOUBLE1)) == 2 and '-' in self.DOUBLE1:\r\n choice:str = list(set(self.disponibles) & set(self.double1))\r\n self.assign(random.choice(choice))\r\n return False\r\n\r\n if count_x_double_2 == 2 and len(set(self.DOUBLE2)) == 2 and '-' in self.DOUBLE2:\r\n choice:str = list(set(self.disponibles) & set(self.double2))\r\n self.assign(random.choice(choice))\r\n return False\r\n \r\n if count_x_double_3 == 2 and len(set(self.DOUBLE3)) == 2 and '-' in self.DOUBLE3:\r\n choice:str = list(set(self.disponibles) & set(self.double3))\r\n self.assign(random.choice(choice))\r\n return False\r\n\r\n if count_x_double_4 == 2 and len(set(self.DOUBLE4)) == 2 and '-' in self.DOUBLE4:\r\n choice:str = list(set(self.disponibles) & set(self.double4))\r\n self.assign(random.choice(choice))\r\n return False\r\n\r\n return True\r\n\r\n def HardDefend(self) -> None:\r\n\r\n if self.counter == 1:\r\n if 'b2' in self.disponibles: # Guarantees atleast draw for CPU\r\n self.assign('b2')\r\n self.cpu_center:bool = 1\r\n else: \r\n eleccion = random.choice(self.corners)\r\n self.assign(eleccion)\r\n self.corners.remove(eleccion)\r\n\r\n elif self.counter == 3:\r\n if self.cpu_center:\r\n eleccion = random.choice(list(set(self.disponibles) - set(self.corners))) #cross\r\n else:\r\n eleccion = random.choice(list(set(self.disponibles) & set(self.corners))) #cornes\r\n self.assign(eleccion)\r\n\r\n else:\r\n print(\"RANDOM RANDOM\")\r\n self.easySelect()\r\n\r\n def HardAttack(self) -> None:\r\n\r\n eleccion:str = 'ERROR'\r\n\r\n if self.counter == 0:\r\n eleccion = random.choice(self.corners)\r\n random.choice(eleccion)\r\n self.assign(eleccion)\r\n self.corners.remove(eleccion)\r\n\r\n elif self.counter == 2:\r\n if 'b2' in self.disponibles:\r\n count:int = 0\r\n for linea in self.square:\r\n \r\n if len(set(linea)) == 2 and 'o' not in linea:\r\n eleccion = list(set(self.squaredic[count])&set(self.disponibles)& set(self.corners))\r\n self.assign(eleccion[0])\r\n return\r\n\r\n count += 1\r\n else:\r\n if self.a1 == 'x':\r\n self.assign('c3')\r\n elif self.c1 == 'x':\r\n self.assign('a3')\r\n elif self.c3 == 'x':\r\n self.assign('a1')\r\n elif self.a3 == 'x':\r\n self.assign('c1')\r\n \r\n elif self.counter == 4:\r\n if 'b2' in self.disponibles:\r\n \r\n for coord in self.corners:\r\n if coord in self.disponibles:\r\n if coord == 'a1' and 'a2' in self.disponibles and 'b1' in self.disponibles:\r\n eleccion = 'a1'\r\n elif coord == 'c1' and 'c2' in self.disponibles and 'b1' in self.disponibles:\r\n eleccion = 'c1'\r\n elif coord == 'a3' and 'a2' in self.disponibles and 'b3' in self.disponibles:\r\n eleccion = 'a3'\r\n elif coord == 'c3' and 'c2' in self.disponibles and 'b3' in self.disponibles:\r\n eleccion = 'c3'\r\n self.assign(eleccion)\r\n else:\r\n print(\"RANDOM RANDOM\")\r\n self.easySelect()\r\n\r\n def hardSelect(self) -> None:\r\n #Llama a ezwin(), hard_attack() y hard_defend()\r\n # en movimientos pares ataca e impares defiende\r\n if self.counter == 0:\r\n self.HardAttack()\r\n elif self.counter == 1:\r\n self.HardDefend()\r\n else:\r\n if self.ezwin():\r\n #solo entra si no tiene un movimiento que asegura victoria\r\n # print(\"NO ezwin, No double\", self.counter)\r\n if self.counter % 2 == 0:\r\n self.HardAttack()\r\n else: self.HardDefend() \r\n\r\n def Bof(self, cantidad:int) ->None:\r\n self.juego(cantidad)\r\n\r\n def match_change(self) -> None:\r\n # asegura que siemrpre cambie quien empieza el match\r\n if self.empezo == '':\r\n self.turno1v1 = self.jujador1Name\r\n self.empezo = self.jujador1Name\r\n\r\n elif self.empezo == self.jujador1Name:\r\n self.turno1v1 = self.jujador2Name\r\n self.empezo = self.jujador2Name\r\n\r\n else:\r\n self.turno1v1 = self.jujador1Name\r\n self.empezo = self.jujador1Name\r\n\r\n def jugador(self) -> None:\r\n # Turno jugador, llama a seleccionar()\r\n print(\"\")\r\n print(\"Turno de\", self.turno1v1,\"(\" + self.turno +\")\")\r\n print(\"Elija entre: \", self.disponibles)\r\n print(self) \r\n self.seleccionar()\r\n\r\n def juego(self, cantidad:int) -> None:\r\n\r\n while(self.jujador1Points < (cantidad + 1)/2 and self.jujador2Points < (cantidad + 1)/2):\r\n\r\n self.match_change()\r\n\r\n while(self.analisis() and self.counter < 9): \r\n\r\n\r\n if self.mode == \"PvP\" or (self.mode == \"PvCPU\" and self.turno1v1 == self.jujador1Name):\r\n self.jugador()\r\n\r\n elif self.mode == \"PvCPU\" or self.mode == \"CPUvCPU\":\r\n\r\n if self.turno1v1 == \"CPU 1 Easy\" or self.turno1v1 == \"CPU 2 Easy\":\r\n self.easySelect()\r\n\r\n elif self.turno1v1 == \"CPU Hard\" or self.turno1v1 == \"CPU 2 Hard\":\r\n self.hardSelect()\r\n\r\n # self.cambiar_turno1v1() #elejir y cambiar\r\n\r\n self.cambiar_turno()#para ganador\r\n self.ganador1v1()\r\n self.clear()\r\n\r\n self.finish()\r\n self.seguir(cantidad)\r\n\r\n def select_CPU1(self) -> None:\r\n\r\n dif:str = input( \"1. Easy\\n2. Hard\\n\")\r\n if dif == '1':\r\n self.jujador1Name = \"CPU 1 Easy\"\r\n elif dif == '2':\r\n self.jujador1Name = \"CPU 1 Hard\"\r\n\r\n def select_CPU2(self) -> None: \r\n\r\n dif:str = input( \"1. Easy\\n2. Hard\\n\")\r\n if dif == '1':\r\n self.jujador2Name = \"CPU 2 Easy\"\r\n elif dif == '2':\r\n self.jujador2Name = \"CPU 2 Hard\"\r\n\r\n def select_BestOf(self) -> None:\r\n # User imputs Bof and calls Bof()\r\n\r\n Bo:str = input(\"\\n1. Best of 1\\n2. Best of 3\\n3. Best of 5\\n\")\r\n \r\n if Bo == '2':\r\n var = 3\r\n elif Bo == '3':\r\n var = 5\r\n else: var = 1\r\n \r\n self.Bof(var)\r\n\r\n def select_BestOf_CPUvCPU(self) -> None:\r\n # User imputs Bof and calls Bof()\r\n\r\n Bo:str = input(\"\\n1. Best of 1\\n2. Best of 11\\n3. Best of 101\\n\")\r\n \r\n if Bo == '2':\r\n var = 11\r\n elif Bo == '3':\r\n var = 101\r\n else: var = 1\r\n \r\n self.Bof(var)\r\n\r\n def Menu(self) -> None:\r\n \r\n choice:str = input(\"1. PvP \\n2. PvCPU \\n3. CPUvCPU\\n\")\r\n\r\n if choice == '1': #1v1\r\n self.mode = \"PvP\"\r\n jug1:str = input(\"Nombre jugagdor 1:\")\r\n self.jujador1Name = jug1\r\n self.turno1v1 = jug1\r\n jug2:str = input(\"Nombre jugagdor 2:\")\r\n self.jujador2Name = jug2\r\n self.select_BestOf()\r\n \r\n elif choice == '2': #1vcpu\r\n self.mode = \"PvCPU\"\r\n jug1:str = input(\"Nombre jugagdor 1:\")\r\n self.jujador1Name = jug1\r\n self.turno1v1 = jug1\r\n print(\"\\nDifficulty:\")\r\n self.select_CPU2()\r\n self.select_BestOf()\r\n\r\n elif choice == '3': #cpuvcpu\r\n self.mode = \"CPUvCPU\"\r\n print(\"\\nCPU 1:\")\r\n self.select_CPU1()\r\n print(\"CPU 2:\")\r\n self.select_CPU2()\r\n self.select_BestOf_CPUvCPU()\r\n \r\n def ass(self, choice, turno):\r\n #FOR DEBUGGING\r\n self.turno = turno\r\n self.assign(choice)\r\n \r\n def __repr__(self) -> str:\r\n \r\n print(' ', '1','2','3')\r\n print('a', self.a1, self.a2, self.a3)\r\n print('b', self.b1, self.b2, self.b3)\r\n print('c', self.c1, self.c2, self.c3)\r\n return \"-----------------------------------\"\r\n\r\n\r\na = Tic()\r\na.Empezar()\r\n\r\n\r\n#TODO \r\n\r\n# text imprima en el .txt el tablero final (si hay ganador?)\r\n","repo_name":"ezemaut/Ta-Te-Ti","sub_path":"Tic.py","file_name":"Tic.py","file_ext":"py","file_size_in_byte":24153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8593793603","text":"import numpy as np\r\nimport os\r\nimport pandas as pd\r\nimport csv\r\n\r\n\r\n\"\"\"\r\n 将数据的名称存到 CSV文件中\r\n\"\"\"\r\n\r\ndef saveCSV(model, class_dir):\r\n data_path = os.path.join(root_dir, model, class_dir)\r\n img_ids = os.listdir(data_path)\r\n res = []\r\n for id in img_ids:\r\n tmp = []\r\n tmp.append(id)\r\n tmp.extend(np.array([ \r\n Classes[class_dir]\r\n ]))\r\n res.append(tmp)\r\n return res\r\n\r\nroot_dir = \"E:/分类模型数据集/二分类数据集/part/\"\r\nsubSet = [\"train\", \"test\"]\r\nClasses = {\"landslide\": 1,\r\n \"ground\": 0}\r\nClass_dir = [\"landslide\", \"ground\"]\r\n\r\nfor sub in subSet:\r\n data = []\r\n for label in Classes:\r\n data.extend(saveCSV(sub, label))\r\n save_path = root_dir + sub + \"Data.csv\"\r\n csvFile = open(save_path, \"w+\", newline='')\r\n try:\r\n writer = csv.writer(csvFile)\r\n for i in range(len(data)):\r\n writer.writerow(data[i])\r\n finally:\r\n csvFile.close()\r\n\r\n\r\n","repo_name":"Lichang000net/Landslide-classification","sub_path":"数据集CSV格式.py","file_name":"数据集CSV格式.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6977057789","text":"import tensorflow as tf\nimport pickle\nimport os\n\n\ndef get_tensor_shape(x):\n x = tf.convert_to_tensor(x)\n static_shape = x.shape.as_list()\n if tf.executing_eagerly():\n return static_shape\n dynamic_shape = tf.shape(x)\n if static_shape is None:\n return dynamic_shape\n dynamic_shape = tf.unstack(dynamic_shape)\n shape = []\n for st, dyn in zip(static_shape, dynamic_shape):\n if st is None:\n shape.append(dyn)\n else:\n shape.append(st)\n return shape\n\n\ndef dataset(path, batch_size):\n data = tf.data.TextLineDataset(path)\n data = data.repeat()\n data = data.map(map)\n output_shapes = {\"tokens\": tf.TensorShape([None]), \"length\": tf.TensorShape([])}\n data = data.padded_batch(batch_size, output_shapes, drop_remainder=True)\n return data\n\n\ndef map(text):\n text = tf.expand_dims(text, 0)\n tokens = tf.strings.split(text, ' ').values\n tokens = tf.string_to_number(tokens, tf.int32)\n length = tf.shape(tokens)[0]\n return {\"tokens\": tokens, \"length\": length}\n\n\ndef loss(labels, logits, mask, use_2d=False):\n if use_2d:\n mask = tf.reshape(mask, [-1])\n labels = tf.reshape(labels, [-1])\n loss = tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)\n loss = loss * tf.cast(mask, loss.dtype)\n loss = tf.reduce_mean(loss)\n return loss\n\ndef add_EOS(tokens, length, EOS_char):\n shape = get_tensor_shape(tokens)\n seq_ln = shape[-1]\n _padd_labels = tf.zeros(shape=shape[:-1] + [1], dtype=tokens.dtype)\n tokens = tf.concat([tokens, _padd_labels], axis=-1)\n EOS = tf.one_hot(length, depth=seq_ln + 1, dtype=tokens.dtype) * EOS_char\n tokens = tokens + EOS\n return tokens\n\n\n\ndef str_logs(logs):\n logs = tf.as_string(logs)\n logs = tf.strings.reduce_join(logs, separator=' ')\n return logs\n\n\nclass Saver(object):\n def __init__(self, var_list=None, max_keep=10):\n self._var_list = var_list\n self.max_keep = max_keep\n self.saved = []\n if self._var_list is None:\n self._var_list = tf.global_variables()\n self._var_plc = [tf.placeholder(v.dtype, v.shape) for v in self._var_list]\n self._ops = [v.assign(u) for u, v in zip(self._var_plc, self._var_list)]\n\n def save(self, session, address):\n vars_list = session.run(self._var_list)\n with open(address, \"wb\") as f:\n pickle.dump(vars_list, f)\n self.saved.append(address)\n if len(self.saved) > self.max_keep:\n del_address = self.saved[0]\n self.saved = self.saved[1:]\n os.remove(del_address)\n\n def restore(self, session, address):\n with open(address, \"rb\") as f:\n var_list = pickle.load(f)\n feed_dict = {u: v for u, v in zip(self._var_plc, var_list)}\n _ = session.run(self._ops, feed_dict)\n","repo_name":"ShenakhtPajouh/autoencoder-transformer-sentence","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37643749288","text":"import numpy as np\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport matplotlib.pyplot as plt\nimport tikzplotlib\nfrom cycler import cycler\n\npublic = pd.read_csv('nc-nnc-kaggle-public.csv')\npublic_nc = public[public['detector'] == 'nc']\npublic_nnc = public[public['detector'] == 'nnc']\nprivate = pd.read_csv('nc-nnc-kaggle-private.csv')\nprivate_nc = private[private['detector'] == 'nc']\nprivate_nnc = private[private['detector'] == 'nnc']\n\nfig, (ax1, ax2) = plt.subplots(1, 2)\n# fig.suptitle('Kaggle Performance Evaluation')\n\nbar_width = 0.4\nbr1 = np.arange(len(public_nc))\nbr2 = [x + bar_width for x in br1]\n\n# bar_cycle = (cycler('hatch', ['///', '--', '...','\\///', 'xxx', '\\\\\\\\']) * cycler('color', 'w')*cycler('zorder', [10]))\n# {'/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}\nbar_cycle = (cycler('hatch', ['.', 'x']) * cycler('color', 'w')*cycler('zorder', [10]))\nstyles = bar_cycle()\n\nax1.bar(br1, public_nc['public'], color='lightgrey', edgecolor='black', width=bar_width, label='nc')\nax1.bar(br2, public_nnc['public'], color='black', width=bar_width, label='$\\\\neg$nc')\nax2.bar(br1, private_nc['private'], color='lightgrey', edgecolor='black', width=bar_width, label='nc')\nax2.bar(br2, private_nnc['private'], color='black', width=bar_width, label='$\\\\neg$nc')\n\nax1.set(ylabel='Public F1 Score')\nax2.yaxis.set_label_position('right')\nax2.yaxis.tick_right()\nax2.set(ylabel='Private F1 Score')\nfig.text(0.5, 0.04, 'Number of ensembled models', ha='center')\n\n# hyperparameters: ma gba valid freefield\n\nax1.set_xticks([r + bar_width for r in range(len(public_nc))], public_nc['ensemble-id'])\nax2.set_xticks([r + bar_width for r in range(len(public_nc))], public_nc['ensemble-id'])\n# plt.gca().set_ylim([0.4, 0.8])\n\nplt.legend(bbox_to_anchor=(-0.6, 1, 1, 0), loc=\"lower left\", mode=\"expand\", ncol=2)\n\ntikzplotlib.save('nc-vs-nnc-kaggle-scores.tex')\nplt.show()\n","repo_name":"KyleMaclean/Bird-Calls-Soundscapes","sub_path":"tikz/archive-nc-vs-nnc-kaggle/nc-nnc-kaggle-public-private.py","file_name":"nc-nnc-kaggle-public-private.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36419032638","text":"\"\"\"Training and testing the dual learning algorithm for unbiased learning to rank.\n\nSee the following paper for more information on the dual learning algorithm.\n\n * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport paddle.nn.functional as F\nimport paddle.nn as nn\nimport paddle\nimport numpy as np\nimport paddle.distributed as dist\nfrom args import config\n\nfrom baseline_model.learning_algorithm.base_algorithm import BaseAlgorithm\nimport baseline_model.utils as utils\n\n\ndef sigmoid_prob(logits):\n return F.sigmoid(logits - paddle.mean(logits, axis=-1, keepdim=True))\n\n\nclass DenoisingNet(nn.Layer):\n def __init__(self, input_vec_size):\n super(DenoisingNet, self).__init__()\n self.linear_layer = nn.Linear(input_vec_size, 1)\n self.elu_layer = nn.ELU()\n self.propensity_net = nn.Sequential(self.linear_layer, self.elu_layer)\n self.list_size = input_vec_size\n\n def forward(self, input_list):\n output_propensity_list = []\n for i in range(self.list_size):\n # Add position information (one-hot vector)\n click_feature = [\n paddle.unsqueeze(\n paddle.zeros_like(\n input_list[i]).astype('float32'), axis=-1) for _ in range(self.list_size)]\n click_feature[i] = paddle.unsqueeze(\n paddle.ones_like(input_list[i]).astype('float32'), axis=-1)\n # Predict propensity with a simple network\n output_propensity_list.append(\n self.propensity_net(\n paddle.concat(\n click_feature, axis=1)))\n\n return paddle.concat(output_propensity_list, axis=1)\n\n\nclass DLA(BaseAlgorithm):\n \"\"\"The Dual Learning Algorithm for unbiased learning to rank.\n\n This class implements the Dual Learning Algorithm (DLA) based on the input layer\n feed. See the following paper for more information on the algorithm.\n\n * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18\n\n \"\"\"\n\n def __init__(self, exp_settings, encoder_model):\n \"\"\"Create the model.\n\n Args:\n data_set: (Raw_data) The dataset used to build the input layer.\n exp_settings: (dictionary) The dictionary containing the model settings.\n \"\"\"\n print('Build DLA')\n\n self.rank_feature_size = exp_settings['rank_feature_size']\n\n self.hparams = utils.hparams.HParams(\n learning_rate=exp_settings['lr'], # Learning rate.\n max_gradient_norm=0.5, # Clip gradients to this norm.\n loss_func='softmax_loss', # Select Loss function\n # the function used to convert logits to probability distributions\n logits_to_prob='softmax',\n # The learning rate for ranker (-1 means same with learning_rate).\n propensity_learning_rate=-1.0,\n ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss\n # Set strength for L2 regularization.\n l2_loss=0.0,\n max_propensity_weight=-1, # Set maximum value for propensity weights\n constant_propensity_initialization=False,\n # Set true to initialize propensity with constants.\n grad_strategy='adamw', # Select gradient strategy\n )\n\n self.hparams.parse(exp_settings['learning_algorithm_hparams'])\n self.exp_settings = exp_settings\n self.max_candidate_num = exp_settings['max_candidate_num'] + \\\n exp_settings['negative_num']\n self.feature_size = exp_settings['feature_size']\n self.combine = exp_settings['combine']\n self.change_label = exp_settings['change_label']\n\n self.feature_id = self.type_idx(config.feature_type)\n\n if 'selection_bias_cutoff' in exp_settings.keys():\n self.rank_list_size = self.exp_settings['selection_bias_cutoff'] + \\\n exp_settings['negative_num']\n self.propensity_model = DenoisingNet(self.rank_list_size)\n\n # DataParallel\n # initialize parallel environment\n dist.init_parallel_env()\n self.model = encoder_model\n if paddle.device.cuda.device_count() >= exp_settings['n_gpus'] > 1:\n print(\"Let's use\", exp_settings['n_gpus'], \"GPUs!\")\n self.model = paddle.DataParallel(self.model)\n\n self.labels_name = [] # the labels for the documents (e.g., clicks)\n self.labels = [] # the labels for the documents (e.g., clicks)\n for i in range(self.max_candidate_num):\n self.labels_name.append(\"label{0}\".format(i))\n\n if self.hparams.propensity_learning_rate < 0:\n self.propensity_learning_rate = float(self.hparams.learning_rate)\n else:\n self.propensity_learning_rate = float(\n self.hparams.propensity_learning_rate)\n self.learning_rate = float(self.hparams.learning_rate)\n\n self.global_step = 0\n\n # Select logits to prob function\n self.logits_to_prob = nn.Softmax(axis=-1)\n if self.hparams.logits_to_prob == 'sigmoid':\n self.logits_to_prob = sigmoid_prob\n\n self.optimizer_func = paddle.optimizer.AdamW\n # if self.hparams.grad_strategy == 'sgd':\n # self.optimizer_func = paddle.optimizer.SGD\n\n print('Loss Function is ' + self.hparams.loss_func)\n # Select loss function\n self.loss_func = None\n if self.hparams.loss_func == 'sigmoid_loss':\n self.loss_func = self.sigmoid_loss_on_list\n elif self.hparams.loss_func == 'pairwise_loss':\n self.loss_func = self.pairwise_loss_on_list\n else: # softmax loss without weighting\n self.loss_func = self.softmax_loss\n\n def separate_gradient_update(self):\n denoise_params = self.propensity_model.parameters()\n ranking_model_params = self.model.parameters()\n # Select optimizer\n\n if self.hparams.l2_loss > 0:\n for p in ranking_model_params:\n self.rank_loss += self.hparams.l2_loss * self.l2_loss(p)\n self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss\n\n opt_denoise = self.optimizer_func(\n learning_rate=self.propensity_learning_rate,\n parameters=self.propensity_model.parameters(),\n grad_clip=nn.ClipGradByNorm(clip_norm=0.5)\n )\n opt_ranker = self.optimizer_func(\n learning_rate=self.learning_rate,\n parameters=self.model.parameters(),\n grad_clip=nn.ClipGradByNorm(clip_norm=0.5)\n )\n\n opt_denoise.clear_grad()\n opt_ranker.clear_grad()\n\n self.loss.backward()\n\n opt_denoise.step()\n opt_ranker.step()\n\n # print(\"=============更新之后===========\")\n # for name, parms in self.model.named_parameters():\n # print('-->name:{} | size:{}'.format(name, parms.shape))\n # print('-->para:', parms)\n # print('-->stop_gradient:', parms.stop_gradient)\n # print('-->grad_value:', parms.grad)\n # print(\"===\")\n # for name, parms in self.propensity_model.named_parameters():\n # print('-->name:{} | size:{}'.format(name, parms.shape))\n # print('-->para:', parms)\n # print('-->stop_gradient:', parms.stop_gradient)\n # print('-->grad_value:', parms.grad)\n # print(\"===\")\n\n def train(self, input_feed):\n \"\"\"Run a step of the model feeding the given inputs.\n\n Args:\n input_feed: (dictionary) A dictionary containing all the input feed data.\n\n Returns:\n A triple consisting of the loss, outputs (None if we do backward)\n\n \"\"\"\n\n # Build model\n self.rank_list_size = self.exp_settings['selection_bias_cutoff'] + \\\n self.exp_settings['negative_num']\n self.model.train()\n self.create_input_feed(input_feed, self.rank_list_size)\n\n # start train\n src = input_feed['src']\n src_segment = input_feed['src_segment']\n src_padding_mask = input_feed['src_padding_mask']\n features = None\n if self.combine:\n features = input_feed['features'][:, :25]\n q_freq = input_feed['q_freq']\n\n train_output = self.model(src=src, src_segment=src_segment,\n src_padding_mask=src_padding_mask, features=features)\n\n if self.change_label != 'no':\n all_features = input_feed['features']\n if config.vote:\n self.feature_id = 25\n train_labels = self.process_target(\n self.labels, all_features[:, self.feature_id],\n pos_num=self.exp_settings['max_candidate_num'],\n temperature=config.temperature, change_label=self.change_label,\n delta=config.delta, mode=config.mode)\n else:\n train_labels = self.labels\n\n train_output = paddle.reshape(\n train_output, shape=[-1, self.max_candidate_num])\n\n self.propensity_model.train()\n propensity_labels = paddle.transpose(train_labels, perm=[1, 0])\n self.propensity = self.propensity_model(\n propensity_labels)\n with paddle.no_grad():\n self.propensity_weights = self.get_normalized_weights(\n self.logits_to_prob(self.propensity))\n self.rank_loss = self.loss_func(\n train_output, train_labels, propensity_weights=self.propensity_weights)\n\n # Compute examination loss\n with paddle.no_grad():\n self.relevance_weights = self.get_normalized_weights(\n self.logits_to_prob(train_output))\n\n self.exam_loss = self.loss_func(\n self.propensity,\n train_labels,\n propensity_weights=self.relevance_weights\n )\n\n if (self.exp_settings['add_freq'] == \"True\"):\n self.loss = (self.exp_settings['freq_k']+1) / (self.exp_settings['freq_b']+q_freq) * (\n self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss)\n else:\n self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss\n\n self.separate_gradient_update()\n\n self.clip_grad_value(train_labels, clip_value_min=0, clip_value_max=1)\n self.global_step += 1\n return self.loss.item()\n\n def get_scores(self, input_feed):\n self.model.eval()\n src = input_feed['src']\n src_segment = input_feed['src_segment']\n src_padding_mask = input_feed['src_padding_mask']\n features = None\n if self.combine:\n if self.rank_feature_size == 869:\n features = input_feed['features'][:, :24]\n else:\n features = input_feed['features']\n scores = self.model(src=src, src_segment=src_segment,\n src_padding_mask=src_padding_mask, features=features)\n return scores\n\n def state_dict(self):\n return {'model': self.model.state_dict(), 'propensity_model': self.propensity_model.state_dict()}\n\n def get_normalized_weights(self, propensity):\n \"\"\"Computes listwise softmax loss with propensity weighting.\n\n Args:\n propensity: (paddle.Tensor) A tensor of the same shape as `output` containing the weight of each element.\n\n Returns:\n (paddle.Tensor) A tensor containing the propensity weights.\n \"\"\"\n propensity_list = paddle.unbind(\n propensity, axis=1) # Compute propensity weights\n pw_list = []\n for i in range(len(propensity_list)):\n pw_i = propensity_list[0] / propensity_list[i]\n pw_list.append(pw_i)\n propensity_weights = paddle.stack(pw_list, axis=1)\n if self.hparams.max_propensity_weight > 0:\n self.clip_grad_value(propensity_weights, clip_value_min=0,\n clip_value_max=self.hparams.max_propensity_weight)\n return propensity_weights\n","repo_name":"sunxiaojie99/wsdmcup-2023","sub_path":"baseline_model/learning_algorithm/dla.py","file_name":"dla.py","file_ext":"py","file_size_in_byte":12374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"18925567030","text":"#!/usr/bin/env python3\n\n__package__ = \"floatingutils.games\"\n__author__ = \"Hannah Ward\"\n\n\n##############\n#games.ai #\n#Parser for #\n#Phaser.io AI#\n##############\n\n\nimport json\nimport argparse\nimport re\nfrom floatingutils.log import Log\n\nlog = Log()\n\nlog.debug(\"Initialising Parser...\")\nparser = argparse.ArgumentParser(description='Parse a .ai file into JSON')\nparser.add_argument(\"filename\", help=\"The .ai file to process\")\nparser.add_argument('--game', default=\"game\",\n help='The name of your \\'game\\' instance - usually just game')\n\nparser.add_argument('--obj', help=\"The game of your game object that the AI will apply to\")\nparser.add_argument(\"--varname\", help=\"The variable name to be output\", default=\"ai\")\nparser.add_argument(\"--out\", default=\"ai.out\", help=\"The output filename\")\nlog.debug(\"Parser ready\")\n\nargs = parser.parse_args()\n\nlog.info(\"Processing {}\".format(args.filename))\nlog.info(\"Using game instance {}\".format(args.game))\nobj = args.obj or \"obj\"\nlog.info(\"Using in-game instance {}\".format(obj))\n\n\nclass AI:\n def __init__(self, game_inst, object_name, aidata):\n log.info(\"Initialising AI Class ({}.{})...\".format(game_inst, object_name)) \n self.aidata = [x.strip() for x in aidata if x != \"\\n\" and x != '']\n \n self.breakarr = [] \n self.breaks = 0\n self.game_inst = game_inst\n self.object_name = object_name \n self.compile_regex()\n self.phases = {} \n self.process_data()\n log.info(\"AI File parsed succesfully\")\n\n def compile_regex(self):\n log.info(\"Compiling regular expressions...\")\n self.re_phase = re.compile(\"\\[[A-Za-z0-9_]*\\]\")\n \n def process_data(self):\n log.info(\"AI processing...\")\n log.line()\n log.incIndent()\n for line in self.aidata:\n if self.re_phase.match(line):\n try:\n log.info(self.phases[phase_name])\n log.info(\"Pushing {}\".format(self.breaks))\n self.breakarr.append(self.breaks)\n self.breaks = 0\n except:\n pass\n log.line(\"-\")\n phase_name = line[1:-1]\n log.info(\"Detected phase {}\".format(phase_name)) \n self.phases[phase_name] = []\n log.line(\"-\")\n else:\n p = (self.Phase(phase_name, line, self.breaks))\n self.breaks = p.breaks\n p = str(p)\n self.phases[phase_name].append(p)\n log.decIndent()\n log.line()\n self.breakarr.append(self.breaks)\n def __repr__(self):\n a = \" phases:[\\n\".format(args.varname)\n for i in self.phases:\n x = \" function() {{\\nif (this.alive) return;\\n\".format(obj)\n x += \" this.alive = true; \\n console.log('Beginning {}');\\n\".format(i);\n for j in self.phases[i]:\n x += \" \" + j + \";\\n\"\n a += x + \"\\nthis.alive=false;\\n{}}},\\n\".format(\"}, this)\"*self.breakarr.pop())\n a+= \"\\n],\"\n return a\n \n class Phase:\n def __init__(self, phasename, phaseinfo, brks):\n \n self.breaks = brks\n self.phaseinfo = self.process(self.tokenise(phaseinfo))\n self.phasename = phasename\n\n def process(self, tokens):\n if tokens[0] == \"set\":\n ##Magic memes\n return \" {}.{} = {}\".format(obj, tokens[1], tokens[2])\n else:\n if len(tokens) == 1:\n tokens.append(\"\")\n if tokens[0] == \"move\":\n return \"{}.add.tween({}).to({{x:{}.x+{}, y:{}.y+{}}}, {}).start()\".format(\n args.game, obj, obj, tokens[1], obj, tokens[2], tokens[3])\n if tokens[0] == \"wait\":\n log.info(\"BREAK DETECTED {}\".format(self.breaks))\n x = \" function() {{\\n\".format(\"update\", self.breaks,self.breaks);\n self.breaks += 1\n return \"\"\"\\n {0}.time.events.add(Phaser.Timer.SECOND * {1}, {3}\\n\"\"\".format(args.game, int(tokens[1]), \"{}_{}\".format(\"update\", self.breaks-1),x)\n return \"{}.{}({})\".format(obj, tokens[0], \",\".join(tokens[1:]))\n\n def tokenise(self, info):\n return info.split(\" \")\n\n def __repr__(self):\n return str(self.phaseinfo)\n\ntry:\n with open(args.filename, \"r\") as f:\n data = f.read()\nexcept FileNotFoundError:\n log.error(\"Could not find file {} -- Make sure it exists\".format(args.filename))\n\nai = AI(args.game, obj, data.split(\"\\n\"))\n\nupdatefunc = \"\"\"\\n setup_{0}_ai: function(object) {{\n {0} = object;\n}},\n\nupdate:function(){{\n if (!this.alive) {{\n var index = Math.floor(Math.random() * this.phases.length);\n var func = this.phases[index]\n \n func(); \n}}}},\"\"\".format(obj, args.varname)\n\nwith open(args.out, \"w\") as f:\n f.write(\"var {}_ai = {{\\n\".format(obj))\n f.write(\"\\n{}:null,\\nalive:false,\\n\".format(obj))\n f.write(str(ai))\n f.write(updatefunc)\n f.write(\"\\n}\")\n","repo_name":"FloatingGhost/Python-Utilities","sub_path":"floatingutils/games/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39461029726","text":"#!/usr/bin/env python\nfrom SingleTopPolarization.Analysis.test_files import testfiles\nimport unittest\nfrom unittest import TestCase\nfrom subprocess import check_call\nimport os\n\nstpol_dir = os.environ.get(\"STPOL_DIR\")\n\ndef cmsrun(*args):\n return check_call([\"cmsRun\"] + list(args))\n\nclass TestStep2(TestCase):\n andir = os.path.join(stpol_dir, \"CMSSW_5_3_11/src/SingleTopPolarization/Analysis/python\")\n ofdir = os.path.join(stpol_dir, \"results/tests\")\n\n def setUp(self):\n pass\n\n def test_step2(self):\n nev = 100\n cmsrun(\n self.andir + \"/runconfs/step2/step2.py\",\n \"inputFiles=%s\" % testfiles[\"step1\"][\"signal\"],\n \"outputFile=%s/test_step2.root\" % self.ofdir,\n \"maxEvents=%d\" % nev\n )\n ofile = self.ofdir + \"/test_step2_numEvent%d.root\" % nev\n assert(os.path.exists(ofile))\n\nif __name__==\"__main__\":\n unittest.main()\n\n","repo_name":"HEP-KBFI/stpol","sub_path":"tests/step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28159842246","text":"import typing\nfrom pathlib import Path\nimport sqlite3\nimport warnings\n\nfrom pantarei import ProgressReporter\n\nfrom . import StorageBackend\nfrom transformerz.struct import uint64\n\nsqliteTypeToPythonType = {\n\t\"INTEGER\": int,\n\t\"TEXT\": str,\n\t\"BLOB\": bytes,\n}\n\n\ndef genPythonTypeToSQLiteType(sqliteTypeToPythonType):\n\tres = {}\n\tfor k, v in sqliteTypeToPythonType.items():\n\t\tif isinstance(v, tuple):\n\t\t\tfor vv in v:\n\t\t\t\tres[vv] = k\n\t\telse:\n\t\t\tres[v] = k\n\treturn res\n\n\npythonTypeToSQLiteType = genPythonTypeToSQLiteType(sqliteTypeToPythonType)\n\n\nclass SQLiteBackend(StorageBackend):\n\t\"\"\"A backend using SQLite as a key-value storage\"\"\"\n\n\t__slots__ = (\"db\",)\n\n\tFILE_EXTENSIONS = (\"sqlite\",)\n\tBASE_ARG_TYPES = (sqlite3.Connection,)\n\tNATIVE_VALUE_TYPES = set(pythonTypeToSQLiteType.keys())\n\n\tclass Table(StorageBackend.Table):\n\t\t__slots__ = ()\n\n\t\tdef exists(self) -> bool:\n\t\t\treturn next(self.parent.db.execute(\"SELECT count(*) FROM `sqlite_master` WHERE `type`='table' AND `name`=?;\", (self.name,)))[0]\n\n\t\tdef create(self, keyType: type = str, valueType: type = bytes) -> None:\n\t\t\tself.parent.db.executescript(\n\t\t\t\tr\"\"\"create table `\"\"\" + self.name + \"\"\"` (\n\t\t\t\t\tkey \"\"\" + pythonTypeToSQLiteType[keyType] + \"\"\" PRIMARY KEY,\n\t\t\t\t\tval \"\"\" + pythonTypeToSQLiteType[keyType] + \"\"\"\n\t\t\t\t);\n\t\t\t\t\"\"\"\n\t\t\t)\n\n\t\tdef getInfo(self):\n\t\t\tres = self.parent.db.execute(\"PRAGMA table_info(`\" + self.name + \"`);\")\n\t\t\tres.row_factory = sqlite3.Row\n\t\t\treturn res\n\n\t\tdef getDataSize(self):\n\t\t\tres = self.parent.db.execute(\"SELECT sum(`pgsize`) as `total`, sum(`unused`) as `wasted` FROM `dbstat` WHERE name=?;\", (self.name,))\n\t\t\tres.row_factory = sqlite3.Row\n\t\t\tres = dict(next(res))\n\t\t\treturn res\n\n\t\tdef __len__(self) -> int:\n\t\t\tcur = self.parent.db.execute(\"select count(*) from `\" + self.name + \"`;\")\n\t\t\tres = next(cur)[0]\n\t\t\tcur.close()\n\t\t\treturn res\n\n\t\t@classmethod\n\t\tdef rawKeysBytes(cls, dbOrCur, name):\n\t\t\tfor rec in dbOrCur.execute(\"select `key` from `\" + name + \"`;\"):\n\t\t\t\tyield rec[0]\n\n\t\t@classmethod\n\t\tdef rawValuesBytes(cls, dbOrCur, name):\n\t\t\tfor rec in dbOrCur.execute(\"select `val` from `\" + name + \"`;\"):\n\t\t\t\tyield rec[0]\n\n\t\t@classmethod\n\t\tdef rawItemsBytes(cls, dbOrCur, name):\n\t\t\treturn dbOrCur.execute(\"select `key`, `val` from `\" + name + \"`;\")\n\n\t\tdef __iter__(self):\n\t\t\treturn self.keys()\n\n\t\tdef keys(self):\n\t\t\treturn self.__class__.rawKeysBytes(self.parent.db, self.name)\n\n\t\tdef values(self):\n\t\t\treturn self.__class__.rawValuesBytes(self.parent.db, self.name)\n\n\t\tdef items(self):\n\t\t\treturn self.__class__.rawItemsBytes(self.parent.db, self.name)\n\n\t\tdef __getitem__(self, key: str) -> bytes:\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\tcur = self.parent.db.execute(\"select `val` from `\" + self.name + \"` where `key` = ?;\", (key,))\n\t\t\t\t\tres = next(cur)[0]\n\t\t\t\t\treturn res\n\t\t\t\tfinally:\n\t\t\t\t\tcur.close()\n\t\t\texcept StopIteration:\n\t\t\t\treturn None\n\n\t\tdef __setitem__(self, key: str, value: bytes):\n\t\t\tself.__class__.setRawBytes(self.parent.db, self.name, key, value)\n\n\t\t@classmethod\n\t\tdef setRawBytes(cls, dbOrCur, tableName, key: str, val: bytes):\n\t\t\treturn dbOrCur.execute(\"insert or replace into `\" + tableName + \"` (`key`, `val`) values (?, ?);\", (key, val))\n\n\t\tdef __delitem__(self, key) -> None:\n\t\t\tself.parent.db.execute(\n\t\t\t\t\"delete from `\" + self.name + \"` where `key` = ?;\",\n\t\t\t\t(key,)\n\t\t\t)\n\n\t\tdef drop(self) -> None:\n\t\t\tself.parent.db.execute(\"drop table `\" + self.name + \"`;\")\n\t\t\tself.parent.commit()\n\n\t\tdef applyToValues(self, funcName: str, ProgressReporter):\n\t\t\tself.parent.db.execute(\"replace into `\" + self.name + \"` (`key`, `val`) SELECT `key`, \" + funcName + \"(`val`) from `\" + self.name + \"`;\")\n\n\t\tdef getKeyType(self):\n\t\t\tfor r in self.getInfo():\n\t\t\t\tif r[\"name\"] == \"key\":\n\t\t\t\t\treturn sqliteTypeToPythonType[r[\"type\"]]\n\t\t\treturn None\n\n\tdef __init__(self, base: typing.Union[Path, str, sqlite3.Connection] = \"./cache.sqlite\", metaDataTableName: str = None) -> None: # pylint:disable=super-init-not-called # metaclass magic\n\t\tif isinstance(base, sqlite3.Connection):\n\t\t\tself.path = None\n\t\t\tself.db = base\n\t\telif isinstance(base, (str, Path)):\n\t\t\tself.path = base\n\t\t\tself.db = None\n\t\telse:\n\t\t\traise ValueError(\"`base` param must be either a path to base, or ':memory', or a sqlite3.Connection object\")\n\n\tdef commit(self):\n\t\tself.db.commit()\n\n\tdef getSQLiteLibCompileOptions(self):\n\t\t\"\"\"\n\t\t{'COMPILER': 'gcc-5.2.0', 'ENABLE_COLUMN_METADATA': True, 'ENABLE_FTS3': True, 'ENABLE_FTS5': True, 'ENABLE_JSON1': True, 'ENABLE_RTREE': True, 'THREADSAFE': 1} for Anaconda for Windows\n\t\t{'COMPILER': 'gcc-8.1.0', 'ENABLE_ATOMIC_WRITE': True, 'ENABLE_COLUMN_METADATA': True, 'ENABLE_DBSTAT_VTAB': True, 'ENABLE_FTS3': True, 'ENABLE_FTS5': True, 'ENABLE_GEOPOLY': True, 'ENABLE_JSON1': True, 'ENABLE_LOAD_EXTENSION': True, 'ENABLE_MEMORY_MANAGEMENT': True, 'ENABLE_PREUPDATE_HOOK': True, 'ENABLE_RBU': True, 'ENABLE_RTREE': True, 'ENABLE_SESSION': True, 'ENABLE_SNAPSHOT': True, 'ENABLE_STAT4': True, 'ENABLE_STMTVTAB': True, 'ENABLE_UNKNOWN_SQL_FUNCTION': True, 'ENABLE_UNLOCK_NOTIFY': True, 'ENABLE_UPDATE_DELETE_LIMIT': True, 'HAVE_ISNAN': True, 'LIKE_DOESNT_MATCH_BLOBS': True, 'THREADSAFE': 1, 'USE_ALLOCA': True, \"ENABLE_DBPAGE_VTAB\": True} for self-compiled with MinGW-w64\n\t\t\"\"\"\n\n\t\tcur = self.db.execute(\"PRAGMA compile_options;\")\n\t\tres = {}\n\t\tfor r in cur:\n\t\t\tspl = r[0].split(\"=\")\n\t\t\tif len(spl) == 2:\n\t\t\t\ttry:\n\t\t\t\t\tspl[1] = int(spl[1])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tpass\n\t\t\t\tres[spl[0]] = spl[1]\n\t\t\telif len(spl) == 1:\n\t\t\t\tres[spl[0]] = True\n\t\t\telse:\n\t\t\t\tres[spl[0]] = spl[0:]\n\n\t\t# cannot be checked via PRAGMA compile_options\n\t\ttry:\n\t\t\tself.db.execute(\"select count(*) from `sqlite_dbpage`;\")\n\t\t\tres[\"ENABLE_DBPAGE_VTAB\"] = True\n\t\texcept sqlite3.OperationalError:\n\t\t\tpass\n\n\t\treturn res\n\n\tdef __enter__(self) -> \"SQLiteBackend\":\n\t\tif self.path is not None:\n\t\t\tself.db = sqlite3.connect(str(self.path))\n\t\t#self.db.isolation_level = None\n\t\t#compileOptions = self.getSQLiteLibCompileOptions()\n\n\tdef __exit__(self, exc_class, exc, traceprocess) -> None:\n\t\tif self.path is not None:\n\t\t\tself.commit()\n\t\t\tself.db.close()\n\t\t\tself.db = None\n\n\tdef __del__(self) -> None:\n\t\ttry:\n\t\t\tif self.db is not None:\n\t\t\t\tself.__exit__(None, None, None)\n\t\texcept BaseException as ex: # pylint:disable=broad-except\n\t\t\twarnings.warn(\"Exception when closing SQLite DB: \" + repr(ex))\n\n\tdef vacuum(self) -> None:\n\t\tself.db.execute(\"reindex;\")\n\t\tself.db.execute(\"vacuum;\")\n\n\tdef optimize(self) -> None:\n\t\tself.db.execute(\"PRAGMA optimize;\")\n\n\tdef createFunction(self, name, f):\n\t\tself.db.create_function(name, 1, f)\n\n\tdef beginTransaction(self):\n\t\tself.db.execute(\"begin;\")\n\n\tdef applyFunctionToTableValues(self, funcName, tableName):\n\t\tself.db.execute(\"replace into `\" + tableName + \"` (`key`, `val`) SELECT `key`, \" + funcName + \"(`val`) from `\" + tableName + \"`;\")\n","repo_name":"KOLANICH-libs/Cache.py","sub_path":"Cache/storageBackends/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1249838139","text":"#!/usr/bin/python3\n\"\"\"Square module definition.\nThis module defines a simple `Square` class\n\"\"\"\n\n\nclass Square:\n \"\"\"Square class definition\n\n Attributes:\n size (int): This is the size of ``Square``.\n \"\"\"\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"Class initalisation definition.\n\n Args:\n size (int): The size of ``Square``.\n position (tuple): where the square is\n i.e., the co-ordinate position(x, y)\n \"\"\"\n self.size = size\n self.position = position\n\n def __str__(self):\n self.my_print()\n\n @property\n def size(self):\n \"\"\"Size as the len of a side of a square\n Raises:\n TypeError: If ``size`` is not an integer\n ValueError: If ``size`` < 0\n \"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n if isinstance(value, int) is not True:\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = value\n\n @property\n def position(self):\n \"\"\"co-ordinate(x, y) defintion of ``Square``.\n Raises:\n TypeError: if value is not a tuple of 2 positive integers\n \"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"sets the co-ordinate(x, y) of ``Square``.\n Args: value as tuple of 2 positive integers\n Raises:\n TypeError: if value is nor a tuple of 2 positve integers.\n \"\"\"\n if isinstance(value, tuple) is not True:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n if len(value) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n if len([i for i in value if isinstance(i, int) and i >= 0]) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n self.__position = value\n\n def area(self):\n \"\"\"area method definition.\n Returns:\n int: The area of ``Square``.\n \"\"\"\n return self.__size ** 2\n\n def pos_print(self):\n \"\"\"returns the position in spaces\"\"\"\n pos = \"\"\n if self.size == 0:\n return \"\\n\"\n for w in range(self.position[1]):\n pos += \"\\n\"\n for w in range(self.size):\n for i in range(self.position[0]):\n pos += \" \"\n for j in range(self.size):\n pos += \"#\"\n pos += \"\\n\"\n return pos\n\n def my_print(self):\n \"\"\"Prints a square in position\"\"\"\n print(self.pos_print(), end=\"\")\n","repo_name":"LowellUfot/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74021700104","text":"import pymongo\nfrom flask import Flask , request\nfrom flask_restful import Resource , Api,reqparse\nimport json\nimport datetime\n\nurl = \"mongodb://mumu:handsome1234@localhost:27017/admin\"\nclient = pymongo.MongoClient(url)\napp = Flask (__name__)\napi = Api(app)\n\nparser = reqparse.RequestParser()\nparser.add_argument('information')\n\ndb = client.admin.cpe_company_limited\n\nclass Registration(Resource):\n\tdef post(self):\n\t\ttoday = datetime.datetime.now()\n\t\targs = parser.parse_args()\n\t\tdata = json.loads(args['information'])\n\t\tdb.insert({\"id\":data['id'], \n\t\t\t\t\"firstname\":data['firstname'], \n\t\t\t\t\"lastname\":data['lastname'],\n\t\t\t\t\"password\":data['password']})\n\t\treturn {'firstname':data['firstname']}\n\nclass Login(Resource):\n\tdef post(self):\n\t\ttoday = datetime.datetime.now()\n\t\targs = parser.parse_args()\n\t\tdata = json.loads(args['information'])\n\t\tresult = db.find_one({\"id\":data['id'], \"password\":data['password']})\n\t\tdb.update({\"id\":data['id']} , {'$push':{\"list_work\":{'datetime':str(today)}}},upsert = True)\n\t\treturn {'firstname': result['firstname']} \n\nclass Search(Resource):\n\tdef post(self):\n\t\targs = parser.parse_args()\n\t\tdata = json.loads(args['information'])\n\t\tresult = db.find_one({\"id\":data['id']})\n\t\treturn {'firstname': result['firstname'], 'list_work':str(result['list_work'])}\n\napi.add_resource(Registration,'/api/regis')\napi.add_resource(Login,'/api/login')\napi.add_resource(Search,'/api/search')\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0' , port = 5000)\n\n","repo_name":"phadermchai/Python-Flask-RESTful-MongoDB-exercise","sub_path":"hw_mongoDB.py","file_name":"hw_mongoDB.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4950236931","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('speech', '0002_speechdetail'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='speechdetail',\n name='content',\n field=models.TextField(default=''),\n ),\n ]\n","repo_name":"zjnu/zjnucloud-api","sub_path":"speech/migrations/0003_auto_20151018_1251.py","file_name":"0003_auto_20151018_1251.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71570212426","text":"import imgFunc\n\nimgPath = \"./log_file/log.txt\"\nclassList = ['L', 'R', 'GO', 'STOP']\nga_main = imgFunc.GA_Main(imgPath, classList)\nga_main.train(50, 0.05)\n\n'''\nimgOperate = imgFunc.ImgOperate(imgPath)\nga = imgFunc.GA(10, 10, imgOperate, classList)\nga.create()\n\ndef seeAns(index):\n (dna1, dna2, loss1, loss2) = ga.getBestDNAs()\n print(\"True Ans: {}\". format(imgOperate.imgList[index].name))\n print(ga.runAns(imgOperate.imgList[index].data, dna1))\n print(ga.runAns(imgOperate.imgList[index].data, dna2))\n print(classList)\n\ndef train(trainNum, mutationRate = 0.3, selfGeneticRate = 0.8, averageRate = 0.2):\n for i in range(trainNum):\n print(\"Runing {}/{}\".format(i+1, trainNum))\n (dna1, dna2, loss1, loss2) = ga.getBestDNAs()\n if (loss1 < loss2):\n ga.reproduce(dna1, dna2, mutationRate, selfGeneticRate, averageRate)\n else:\n ga.reproduce(dna2, dna1, mutationRate, selfGeneticRate, averageRate)\n\n'''\n","repo_name":"FengodChen/SmartCar","sub_path":"LiaoChen_Tools/imgProc/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37822757936","text":"#coding=utf-8\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nfrom preparedata import PrepareData\nfrom nets.ssd import g_ssd_model\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nobj= PrepareData()\n\nimage, filename,glabels,gbboxes,gdifficults,gclasses_face, localizations_face, gscores_face,\\\ngclasses_head, localizations_head, gscores_head,gclasses_body, localizations_body,\\\ngscores_body=obj.get_voc_2007_2012_train_data()\n\nssd_anchors = g_ssd_model.ssd_anchors_all_layers()\n\ninit = tf.initialize_all_variables()\n\nwith tf.Session() as sess:\n sess.run(init)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n \n for i in range(5):\n img,picname,label,bbox,gclass,glocal,gscore=sess.run([image, filename,glabels,gbboxes,gclasses_face, localizations_face, gscores_face])\n \n b=np.zeros_like(img[0])\n b[:,:,1]=img[0][:,:,1]\n b[:,:,0]=img[0][:,:,2]\n b[:,:,2]=img[0][:,:,0]\n box=bbox[0]\n hh,ww=b.shape[:2]\n \n gboxes=[]\n for u in range(len(gclass)):\n gbox=glocal[u][0].reshape([-1,4])\n gcls=gclass[u][0].reshape([-1])\n gsc=gscore[u][0].reshape([-1])\n anchor_bboxes=ssd_anchors[u]\n yref, xref, href_src, wref_src = anchor_bboxes\n href=href_src/2.\n wref=wref_src/2.\n xref = np.reshape(xref, [-1])\n yref = np.reshape(yref, [-1])\n cx = gbox[:, 0] * wref * 0.1 + xref\n cy = gbox[:, 1] * href * 0.1 + yref\n w = wref * np.exp(gbox[:, 2] * 0.2)\n h = href * np.exp(gbox[:, 3] * 0.2)\n bboxes = np.zeros_like(gbox)\n bboxes[:, 0] = cy - h / 2.\n bboxes[:, 1] = cx - w / 2.\n bboxes[:, 2] = cy + h / 2.\n bboxes[:, 3] = cx + w / 2.\n \n for f in range(len(gcls)):\n if gcls[f]==1:\n gboxes.append(gbox[f])\n cv2.rectangle(b,(int(bboxes[f][1]*640),int(bboxes[f][0]*640)),(int(bboxes[f][3]*640),int(bboxes[f][2]*640)),(0,0,255),3) \n \n for j in range(len(box)):\n cv2.rectangle(b,(int(box[j][1]*ww),int(box[j][0]*hh)),(int(box[j][3]*ww),int(box[j][2]*hh)),(0,255,0),3)\n cv2.imshow('test',b.astype(np.uint8))\n cv2.waitKey(0)\n \n coord.request_stop()\n coord.join(threads)\n","repo_name":"EricZgw/PyramidBox","sub_path":"check_data_io.py","file_name":"check_data_io.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"81"} +{"seq_id":"4777813484","text":"from setuptools import setup, find_packages\n\nrequired_packages = [\n \"graphql-core>=3.0,<3.3\",\n \"graphql-server>=2.0,<3.3\",\n \"sanic>=21.12,<24\",\n]\n\ntests_require = [\"pytest>=2.7.3\", \"aiohttp>=3.5.0,<4\", \"yarl>=1.0,<2.0\", \"Jinja2>=2.10.1\"]\n\nsetup(\n name=\"Sanic-GraphQL\",\n version=\"1.3.0\",\n description=\"Adds GraphQL support to your Sanic application\",\n long_description=open(\"README.rst\", encoding=\"utf-8\").read(),\n url=\"https://github.com/graphql-python/sanic-graphql\",\n download_url=\"https://github.com/graphql-python/sanic-graphql/releases\",\n author=\"Sergey Porivaev\",\n author_email=\"porivaevs@gmail.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords=\"api graphql protocol sanic\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=required_packages,\n tests_require=tests_require,\n extras_require={\"test\": tests_require},\n include_package_data=True,\n platforms=\"any\",\n)\n","repo_name":"Heatstealer/sanic-graphql","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10354016710","text":"import argparse\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nimport torch.backends.cudnn as cudnn\nfrom utils import is_image_file, load_img, save_img\nfrom torchvision import utils as vutils\nfrom unet_g import define_G, define_D, get_scheduler, update_learning_rate\nfrom Dataset.data import get_training_set\nfrom loss.ganloss import GANLoss\nfrom loss.vggloss import *\n\nparser = argparse.ArgumentParser(description='multi-cascade')\nparser.add_argument('--dataset', required=True, help='images')\nparser.add_argument('--batch_size', type=int, default=1, help='training batch size')\nparser.add_argument('--direction', type=str, default='b2a', help='a2b or b2a')\nparser.add_argument('--input_nc', type=int, default=1, help='input image channels')\nparser.add_argument('--output_nc', type=int, default=1, help='output image channels')\nparser.add_argument('--ngf', type=int, default=64, help='generator filters in first conv layer')\nparser.add_argument('--ndf', type=int, default=64, help='discriminator filters in first conv layer')\nparser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count')\nparser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')\nparser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')\nparser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')\nparser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine')\nparser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='use cuda?')\nparser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use')\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\nparser.add_argument('--lamb', type=int, default=100, help='weight on L1 term in objective')\nparser.add_argument('--outf', default='./tranre', help='folder to output images and model checkpoints')\nparser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')\nopt = parser.parse_args()\n\n\nprint(opt)\n\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nif opt.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n\ncudnn.benchmark = True\n\ntorch.manual_seed(opt.seed)\nif opt.cuda:\n torch.cuda.manual_seed(opt.seed)\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nprint('===> Loading datasets')\nroot_path = \"dataset/\"\ntrain_set = get_training_set(root_path + opt.dataset, opt.direction)\ntraining_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True)\n\n\ndevice = torch.device(\"cuda:0\" if opt.cuda else \"cpu\")\n\nprint('===> Building models')\n\nnet_g1 = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, 'normal', 0.02, gpu_id=device)\nnet_g2 = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, 'normal', 0.02, gpu_id=device)\nnet_g3 = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, 'normal', 0.02, gpu_id=device)\n\n\nnet_d = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'basic', gpu_id=device)\n\nvgg_loss = VGGLoss().to(device)\ncriterionGAN = GANLoss(opt.gan_mode).to(device)\ncriterionL1 = nn.L1Loss().to(device)\ncriterionMSE = nn.MSELoss().to(device)\n\noptimizer_g = optim.Adam(net_g1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizer_g2 = optim.Adam(net_g2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizer_g3 = optim.Adam(net_g3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\noptimizer_d = optim.Adam(net_d.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\nnet_g1_scheduler = get_scheduler(optimizer_g, opt)\nnet_g2_scheduler = get_scheduler(optimizer_g2, opt)\nnet_g3_scheduler = get_scheduler(optimizer_g3, opt)\n\nnet_d_scheduler = get_scheduler(optimizer_d, opt)\n\n\nfor epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n # train\n for iteration, batch in enumerate(training_data_loader, 1):\n # forward\n real_a, real_b = batch[0].to(device), batch[1].to(device)\n fake_b1 = net_g1((real_a))\n fake_b2 = net_g2(fake_b1)\n fake_b3 = net_g3(fake_b2)\n\n ######################\n # (1) Update D network\n ######################\n optimizer_d.zero_grad()\n \n # train with fake\n fake_ab = torch.cat((real_a, fake_b1), 1)\n pred_fake = net_d.forward(fake_ab.detach())\n loss_d_fake = criterionGAN(pred_fake, False)\n\n # train with real\n real_ab = torch.cat((real_a, real_b), 1)\n pred_real = net_d.forward(real_ab)\n loss_d_real = criterionGAN(pred_real, True)\n \n # Combined D loss\n loss_d_1 = (loss_d_fake + loss_d_real) * 0.5\n ######################\n # (2) Update G network\n ######################\n\n optimizer_g.zero_grad()\n optimizer_g2.zero_grad()\n optimizer_g3.zero_grad()\n fake_ab = torch.cat((real_a, fake_b1), 1)\n pred_fake = net_d.forward(fake_ab)\n loss_g_gan = criterionGAN(pred_fake, True)\n loss_g_l1 = criterionL1(fake_b1, real_b) * opt.lamb\n\n loss_v = vgg_loss(fake_b1, real_b)\n loss_g_1 = loss_g_gan + loss_g_l1 + loss_v\n ######################\n # (2) Update D network\n ######################\n \n # # train with fake\n fake_ab2 = torch.cat((real_a, fake_b2), 1)\n pred_fake2 = net_d.forward(fake_ab2.detach())\n loss_d_fake2 = criterionGAN(pred_fake2, False)\n\n # # train with real\n real_ab = torch.cat((real_a, real_b), 1)\n pred_real2 = net_d.forward(real_ab)\n loss_d_real2 = criterionGAN(pred_real2, True)\n\n loss_d_2 = (loss_d_fake2 + loss_d_real2) * 0.5\n\n # # ######################\n # (2) Update G2 network\n # # ######################\n fake_ab2 = torch.cat((real_a, fake_b2), 1)\n pred_fake2 = net_d.forward(fake_ab2)\n loss_g_gan_2 = criterionGAN(pred_fake2, True)\n loss_g_l1_2 = criterionL1(fake_b2, real_b) * opt.lamb\n loss_v_2 = vgg_loss(fake_b2, real_b)\n loss_g_2 = loss_g_gan_2 + loss_g_l1_2 + loss_v_2\n # ######################\n # # (3) Update D network\n # ######################\n fake_ab3 = torch.cat((real_a, fake_b3), 1)\n pred_fake3 = net_d.forward(fake_ab3.detach())\n loss_d_fake3 = criterionGAN(pred_fake3, False)\n real_ab = torch.cat((real_a, real_b), 1)\n pred_real3 = net_d.forward(real_ab)\n loss_d_real3 = criterionGAN(pred_real, True)\n loss_d_3 = (loss_d_fake3 + loss_d_real3) * 0.5\n loss_d = loss_d_1+loss_d_2+loss_d_3\n loss_d.backward(retain_graph=True)\n optimizer_d.step()\n # # # ######################\n # (2) Update G3 network\n # # # ######################\n fake_ab3 = torch.cat((real_a, fake_b3), 1)\n pred_fake3 = net_d.forward(fake_ab3)\n loss_g_gan_3 = criterionGAN(pred_fake3, True)\n\n # # # # Second, G(A) = B\n loss_g_l1_3 = criterionL1(fake_b3, real_b) * opt.lamb\n loss_v_3 = vgg_loss(fake_b3, real_b)\n loss_g_3 = loss_g_gan_3 + loss_g_l1_3 + loss_v_3 \n loss_g = 0.1667*loss_g_1 + 0.3333*loss_g_2 + 0.5*loss_g_3 \n loss_g.backward(retain_graph=True)\n\n optimizer_g.step()\n optimizer_g2.step()\n optimizer_g3.step()\n\n print(\"===> Epoch[{}]({}/{}): Loss_D: {:.4f} Loss_G: {:.4f} \".format(\n epoch, iteration, len(training_data_loader), loss_d.item(), loss_g.item()))\n \n update_learning_rate(net_g1_scheduler, optimizer_g)\n update_learning_rate(net_g2_scheduler, optimizer_g2)\n update_learning_rate(net_g3_scheduler, optimizer_g3)\n update_learning_rate(net_d_scheduler, optimizer_d)\n #checkpoint\n if epoch % 1 == 0:\n if not os.path.exists(\"checkpoint\"):\n os.mkdir(\"checkpoint\")\n if not os.path.exists(os.path.join(\"checkpoint\", opt.dataset)):\n os.mkdir(os.path.join(\"checkpoint\", opt.dataset))\n net_g1_model_out_path = \"checkpoint/{}/netG1_model_epoch_{}.pth\".format(opt.dataset, epoch)\n net_g2_model_out_path = \"checkpoint/{}/netG2_model_epoch_{}.pth\".format(opt.dataset, epoch)\n net_g3_model_out_path = \"checkpoint/{}/netG3_model_epoch_{}.pth\".format(opt.dataset, epoch)\n net_d_model_out_path = \"checkpoint/{}/netD_model_epoch_{}.pth\".format(opt.dataset, epoch)\n torch.save(net_g1, net_g1_model_out_path)\n torch.save(net_g2, net_g2_model_out_path)\n torch.save(net_g3, net_g3_model_out_path)\n torch.save(net_d, net_d_model_out_path)\n print(\"Checkpoint saved to {}\".format(\"checkpoint\" + opt.dataset))\n\n\n","repo_name":"wangyu719/CmmcSegNet","sub_path":"Multi-cascade pix2pix/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25129358014","text":"from data.daos import location_dao as loc_dao\r\nfrom lib import formatting as fmt\r\nfrom lib import masters_data_analytics_lib as mlib\r\nfrom lib import plot_tools as plttool\r\nfrom lib import stats as stats\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.colors import ListedColormap\r\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)\r\nfrom scipy import stats\r\n\r\nimport colorcet as cc\r\nimport logging\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\ndef generate_report_artefacts(session_id\r\n , report_context\r\n , properties\r\n , **kwargs): \r\n \"\"\"\r\n Manager to create the formatted String data to include in the sd_general_report_data\r\n \"\"\"\r\n ## These are the pandas dataframes used in this manager. They may be used in others too\r\n validated_search_term = report_context[\"validated_search_term\"]\r\n\r\n city = validated_search_term[\"city\"]\r\n borough = validated_search_term[\"borough\"]\r\n ward_name = validated_search_term[\"ward_name\"]\r\n post_code = validated_search_term[\"post_code\"]\r\n\r\n year_from = validated_search_term[\"year_from\"]\r\n year_to = validated_search_term[\"year_to\"]\r\n\r\n all_ward_post_codes = report_context[\"all_ward_post_codes\"]\r\n all_borough_wards = report_context[\"all_borough_wards\"]\r\n other_post_codes = report_context[\"other_post_codes\"]\r\n other_wards = report_context[\"other_wards\"]\r\n number_of_boroughs = report_context[\"number_of_boroughs\"]\r\n\r\n ###\r\n ### LOCATION SECTION\r\n ###\r\n ### We have a post code\r\n if post_code != \"\":\r\n location_field_01 = \"The post code {} belongs to the ward {} and borough {} within the city of {}. There {} {} other post code{} which the following data is part of. {} being {}. There are {} other ward{} in the borough which are {}\" \\\r\n .format(post_code\r\n , ward_name\r\n , borough\r\n , city.capitalize()\r\n , (\"are\" if len(all_ward_post_codes)>1 else \"is\")\r\n , len(other_post_codes)\r\n , (\"s\" if len(other_post_codes) > 1 else \"\")\r\n , (\"These\" if len(other_post_codes)>1 else \"This\")\r\n , fmt.series_format(other_post_codes)\r\n , len(other_wards)\r\n , (\"s\" if len(other_wards) > 1 else \"\")\r\n , fmt.series_format(other_wards))\r\n ### We have ward\r\n else:\r\n location_field_01 = \"The ward {} belongs to the borough {} within the city of {}. There are {} other ward{} in the borough which are {}\" \\\r\n .format(ward_name\r\n , borough\r\n , city.capitalize()\r\n , len(other_wards)\r\n , (\"s\" if len(other_wards) > 1 else \"\")\r\n , fmt.series_format(other_wards))\r\n \r\n ## Choose the column to rank by \r\n if post_code != \"\":\r\n ranking_column = \"oacode_sum\"\r\n else: \r\n ranking_column = \"ward_sum\"\r\n \r\n ###\r\n ### POPULATION SECTION\r\n ###\r\n pop_all_stats = report_context[\"pop_all_stats\"] \r\n pop_male_stats = report_context[\"pop_male_stats\"] \r\n pop_female_stats = report_context[\"pop_female_stats\"] \r\n pop_density_stats = report_context[\"pop_density_stats\"] \r\n \r\n pop_top = report_context[\"pop_top\"] \r\n pop_this = report_context[\"pop_this\"]\r\n pop_bottom = report_context[\"pop_bottom\"]\r\n\r\n ### Stats for the searched for borough \r\n population_field_01_part_01 = \"The population density of {} is ranked {:g} of {} at {:.2f}\" \\\r\n .format(borough, pop_this[\"rank\"].values[0], number_of_boroughs, round(pop_this[\"total\"].values[0], 2))\r\n \r\n population_field_01_part_02 = \"which is {} the average borough population density of {:.2f}.\".format(\"above\" if round(pop_this[\"total\"].values[0], 2) > pop_density_stats[\"borough_mean\"] else \"below\", pop_density_stats[\"borough_mean\"])\r\n \r\n ### If it's not the first then display the first\r\n population_field_01_part_03 = \"\"\r\n \r\n if pop_this[\"rank\"].values[0] != 1:\r\n population_field_01_part_03 = \"{} has the highest population density at {:.2f}.\" \\\r\n .format(pop_top.iloc[0][\"borough\"], round(pop_top.iloc[0][\"total\"]), 2)\r\n \r\n population_field_01_part_04 = \"\"\r\n \r\n if pop_this[\"rank\"].values[0] != number_of_boroughs:\r\n population_field_01_part_04 =\"{} has the lowest population density at {:.2f}.\" \\\r\n .format(pop_bottom.iloc[-1][\"borough\"], round(pop_bottom.iloc[-1][\"total\"]), 2)\r\n\r\n #### GENDER \r\n pop_male_ward_ratio = report_context[\"pop_male_ward_ratio\"]\r\n pop_female_ward_ratio = report_context[\"pop_female_ward_ratio\"]\r\n \r\n pop_male_borough_ratio = report_context[\"pop_male_borough_ratio\"] \r\n pop_female_borough_ratio = report_context[\"pop_female_borough_ratio\"]\r\n\r\n pop_male_city_ratio = report_context[\"pop_male_city_ratio\"]\r\n pop_female_city_ratio = report_context[\"pop_female_city_ratio\"]\r\n\r\n #### WARD LEVEL\r\n population_field_01_part_06 = \"\"\r\n ### What to print\r\n if pop_male_ward_ratio > pop_female_ward_ratio:\r\n population_field_01_part_06 = \"Males account for {:g}% of the ward population. Females account for {:g}%.\"\\\r\n .format(pop_male_ward_ratio, pop_female_ward_ratio)\r\n \r\n elif pop_male_ward_ratio < pop_female_ward_ratio:\r\n population_field_01_part_06 = \"Females account for {:g}% of the ward population. Males account for {:g}%.\"\\\r\n .format(pop_female_ward_ratio, pop_male_ward_ratio)\r\n else:\r\n population_field_01_part_06 = \"Males and females are equal at the ward level.\"\r\n \r\n ### BOROUGH LEVEL\r\n population_field_01_part_07 = \"\"\r\n ### What to print\r\n if pop_male_borough_ratio > pop_female_borough_ratio:\r\n population_field_01_part_07 = \"Males account for {:g}% of the borough population, which is {} the average of {:g}% at borough level. Females account for {:g}% which is {} the average of {:g}% at borough level.\"\\\r\n .format(pop_male_borough_ratio, fmt.hls_str(pop_male_borough_ratio, pop_male_city_ratio), \\\r\n pop_male_city_ratio, pop_female_borough_ratio, fmt.hls_str(pop_female_borough_ratio, pop_female_city_ratio), pop_female_city_ratio)\r\n \r\n elif pop_male_borough_ratio < pop_female_borough_ratio:\r\n population_field_01_part_07 = \"Females account for {:g}% of the borough population, which is {} the average of {:g}% at borough level. Males account for {:g}% which is {} the average of {:g}% at borough level.\"\\\r\n .format(pop_female_borough_ratio, fmt.hls_str(pop_female_borough_ratio, pop_female_city_ratio),\\\r\n pop_female_city_ratio, pop_male_borough_ratio, fmt.hls_str(pop_male_borough_ratio, pop_male_city_ratio), pop_male_city_ratio)\r\n else:\r\n population_field_01_part_07 =\"Males and females are equal for the borough. The borough level average is males {:g}% and females {:g}%.\"\\\r\n .format(pop_male_city_ratio, pop_female_city_ratio)\r\n \r\n ### COMBINED\r\n population_field_01 = population_field_01_part_01 + \" \" + \\\r\n population_field_01_part_02 + \" \" + \\\r\n population_field_01_part_03 + \" \" + \\\r\n population_field_01_part_04 + \" \" + \\\r\n population_field_01_part_06 + \" \" + \\\r\n population_field_01_part_07 \r\n \r\n ###\r\n ### POPULATION PLOTS\r\n ###\r\n from lib import plot_tools as plt_tool\r\n \r\n data = [\r\n pop_male_ward_ratio\r\n , pop_female_ward_ratio\r\n , pop_male_borough_ratio\r\n , pop_female_borough_ratio\r\n , pop_male_city_ratio\r\n , pop_female_city_ratio\r\n ]\r\n \r\n names = [\"ward\", \"borough\", \"borough average\"]\r\n options = [\"male\", \"female\"]\r\n title = \"Gender Population - Ward, Borough & Borough Average\"\r\n props = lambda key: {\"color\": \"orange\" if \"male\" in key else \"deepskyblue\"}\r\n \r\n mekko_gender_borough_plot_file = \"./reports/generation/images/{}_mekko_gender_borough_{}_{}_{}.png\".format(session_id, city, borough, ward_name)\r\n mekko_chart_file = plt_tool.mekko_chart(data=data, names=names, options=options, title=title, props=props) \r\n mlib.save_plot_filename(plot=mekko_chart_file, filename=mekko_gender_borough_plot_file, save_artefacts=True)\r\n \r\n ###\r\n ### HOUSEHOLD SECTION\r\n ###\r\n hous_commerical_stats = report_context[\"hous_commerical_stats\"]\r\n hous_detatched_stats = report_context[\"hous_detatched_stats\"]\r\n hous_flat_stats = report_context[\"hous_flat_stats\"]\r\n hous_semi_stats = report_context[\"hous_semi_stats\"]\r\n hous_terraced_stats = report_context[\"hous_terraced_stats\"]\r\n \r\n ## Create a data frame of the findings\r\n household_location_stats_columns = [\"household_type\", \"d_nd\", \"oacode_sum\", \"ward_sum\", \"borough_sum\", \"city_borough_mean\", \"city_sum\" ]\r\n household_location_stats_data = [\r\n [ \"commercial\", \"nd\", hous_commerical_stats[\"oacode_sum\"], hous_commerical_stats[\"ward_sum\"], hous_commerical_stats[\"borough_sum\"], hous_commerical_stats[\"borough_mean\"], hous_commerical_stats[\"city_sum\"]],\r\n [ \"detached\", \"d\", hous_detatched_stats[\"oacode_sum\"], hous_detatched_stats[\"ward_sum\"], hous_detatched_stats[\"borough_sum\"], hous_detatched_stats[\"borough_mean\"], hous_detatched_stats[\"city_sum\"]],\r\n [ \"flat\", \"d\", hous_flat_stats[\"oacode_sum\"], hous_flat_stats[\"ward_sum\"], hous_flat_stats[\"borough_sum\"], hous_flat_stats[\"borough_mean\"], hous_flat_stats[\"city_sum\"]],\r\n [\"semi-detached\", \"d\", hous_semi_stats[\"oacode_sum\"], hous_semi_stats[\"ward_sum\"], hous_semi_stats[\"borough_sum\"], hous_semi_stats[\"borough_mean\"], hous_semi_stats[\"city_sum\"]],\r\n [ \"terraced\", \"d\", hous_terraced_stats[\"oacode_sum\"], hous_terraced_stats[\"ward_sum\"], hous_terraced_stats[\"borough_sum\"], hous_terraced_stats[\"borough_mean\"], hous_terraced_stats[\"city_sum\"]]\r\n ]\r\n\r\n household_location_stats_df = pd.DataFrame(household_location_stats_data, columns=household_location_stats_columns)\r\n \r\n ## Remove the non dwelling data i.e. Commercial buildings\r\n ## And sort descending\r\n household_location_d_stats_df = household_location_stats_df.loc[household_location_stats_df[\"d_nd\"] == \"d\"]\r\n household_location_d_stats_df = household_location_d_stats_df.sort_values(by=[ranking_column], ascending=False)\r\n \r\n ## Build the report text\r\n house_hold = []\r\n household_type_pretty = {\"detached\":\"Detached\", \"flat\":\"Flat\", \"semi-detached\":\"Semi Detached\", \"terraced\":\"Terraced\"}\r\n for i in range(0, 4):\r\n if post_code != \"\":\r\n str = \"{} - post codes:{} - ward:{} - borough:{} - borough avg:{}\".format(\r\n household_type_pretty[household_location_d_stats_df.iloc[i][\"household_type\"]], \\\r\n household_location_d_stats_df.iloc[i][\"oacode_sum\"], \\\r\n household_location_d_stats_df.iloc[i][\"ward_sum\"], \\\r\n household_location_d_stats_df.iloc[i][\"borough_sum\"], \\\r\n household_location_d_stats_df.iloc[i][\"city_borough_mean\"], \\\r\n )\r\n else:\r\n str = \"{} - ward:{} - borough:{} - borough avg:{}\".format(\r\n household_type_pretty[household_location_d_stats_df.iloc[i][\"household_type\"]], \\\r\n household_location_d_stats_df.iloc[i][\"ward_sum\"], \\\r\n household_location_d_stats_df.iloc[i][\"borough_sum\"], \\\r\n household_location_d_stats_df.iloc[i][\"city_borough_mean\"], \\\r\n )\r\n \r\n house_hold.append(str)\r\n \r\n ###\r\n ### EDUCATION SECTION\r\n ###\r\n edu_unknown_stats = report_context[\"edu_unknown_stats\"]\r\n edu_none_stats = report_context[\"edu_none_stats\"]\r\n edu_level1_stats = report_context[\"edu_level1_stats\"]\r\n edu_level2_stats = report_context[\"edu_level2_stats\"]\r\n edu_level3_stats = report_context[\"edu_level3_stats\"]\r\n edu_level4_stats = report_context[\"edu_level4_stats\"]\r\n edu_other_stats = report_context[\"edu_other_stats\"]\r\n \r\n ## Create a data frame of the findings\r\n qualification_location_stats_columns = [\"qualification_type\", \"oacode_sum\", \"ward_sum\", \"borough_sum\", \"city_borough_mean\", \"city_sum\" ]\r\n qualification_location_stats_data = [\r\n [ \"no_qualification\", edu_none_stats[\"oacode_sum\"], edu_none_stats[\"ward_sum\"], edu_none_stats[\"borough_sum\"], edu_none_stats[\"borough_mean\"], edu_none_stats[\"city_sum\"]],\r\n [ \"level1\", edu_level1_stats[\"oacode_sum\"], edu_level1_stats[\"ward_sum\"], edu_level1_stats[\"borough_sum\"], edu_level1_stats[\"borough_mean\"], edu_level1_stats[\"city_sum\"]],\r\n [ \"level2\", edu_level2_stats[\"oacode_sum\"], edu_level2_stats[\"ward_sum\"], edu_level2_stats[\"borough_sum\"], edu_level2_stats[\"borough_mean\"], edu_level2_stats[\"city_sum\"]],\r\n [ \"level3\", edu_level3_stats[\"oacode_sum\"], edu_level3_stats[\"ward_sum\"], edu_level3_stats[\"borough_sum\"], edu_level3_stats[\"borough_mean\"], edu_level3_stats[\"city_sum\"]],\r\n [ \"level4\", edu_level4_stats[\"oacode_sum\"], edu_level4_stats[\"ward_sum\"], edu_level4_stats[\"borough_sum\"], edu_level4_stats[\"borough_mean\"], edu_level4_stats[\"city_sum\"]],\r\n [\"other_qualifications\", edu_other_stats[\"oacode_sum\"], edu_other_stats[\"ward_sum\"], edu_other_stats[\"borough_sum\"], edu_other_stats[\"borough_mean\"], edu_other_stats[\"city_sum\"]]\r\n ]\r\n \r\n qualification_location_stats_df = pd.DataFrame(qualification_location_stats_data, columns=qualification_location_stats_columns)\r\n \r\n ## Sort descending\r\n qualification_location_stats_df = qualification_location_stats_df.sort_values(by=[ranking_column], ascending=False)\r\n \r\n ## Build the report text\r\n education = []\r\n qualification_type_pretty = {\"no_qualification\":\"No qualifications\", \"level1\":\"Level 1\", \"level2\":\"Level 2\", \"level3\":\"Level 3\", \"level4\":\"Level 4\", \"other_qualifications\":\"Other qualifications\"}\r\n for i in range(0, 6):\r\n if post_code != \"\":\r\n str = \"{} - post codes:{} - ward:{} - borough:{} - borough avg:{}\".format(\r\n qualification_type_pretty[qualification_location_stats_df.iloc[i][\"qualification_type\"]], \\\r\n qualification_location_stats_df.iloc[i][\"oacode_sum\"], \\\r\n qualification_location_stats_df.iloc[i][\"ward_sum\"], \\\r\n qualification_location_stats_df.iloc[i][\"borough_sum\"], \\\r\n qualification_location_stats_df.iloc[i][\"city_borough_mean\"]\r\n )\r\n else:\r\n str = \"{} - ward:{} - borough:{} - borough avg:{}\".format(qualification_type_pretty[qualification_location_stats_df.iloc[i][\"qualification_type\"]], \\\r\n qualification_location_stats_df.iloc[i][\"ward_sum\"], \\\r\n qualification_location_stats_df.iloc[i][\"borough_sum\"], \\\r\n qualification_location_stats_df.iloc[i][\"city_borough_mean\"]\r\n )\r\n \r\n education.append(str)\r\n \r\n \r\n \r\n ###\r\n ### EARNINGS\r\n ### \r\n \r\n ## Retrieve the data\r\n borough_salary_ranking_by_year_df = report_context[\"borough_salary_ranking_by_year_df\"]\r\n \r\n ## Generate the plot\r\n borough_earnings_ranking_filtered_year_df = borough_salary_ranking_by_year_df[(borough_salary_ranking_by_year_df[\"YEAR\"].astype(int) >= year_from) & (borough_salary_ranking_by_year_df[\"YEAR\"].astype(int) <= year_to)]\r\n \r\n years_sorted = borough_earnings_ranking_filtered_year_df[\"YEAR\"].sort_values().drop_duplicates()\r\n first_year = years_sorted.iloc[0]\r\n borough_sort_order = borough_earnings_ranking_filtered_year_df.loc[borough_earnings_ranking_filtered_year_df[\"YEAR\"]==first_year][\"BOROUGH\"]\r\n top_borough = borough_sort_order.iloc[0]\r\n mid_borough = borough_sort_order.iloc[16]\r\n bottom_borough = borough_sort_order.iloc[-1]\r\n \r\n ## Loop through the boroughs in the sort order of first year\r\n all_borough_ranking_by_year = []\r\n \r\n ## BOROUGH LOOP\r\n for _borough in borough_sort_order:\r\n \r\n borough_ranking_by_year_list = []\r\n \r\n ## Add the Borough\r\n borough_ranking_by_year_list.append(_borough)\r\n \r\n ## YEAR LOOP\r\n for year in years_sorted:\r\n \r\n borough_ranking_for_year = borough_earnings_ranking_filtered_year_df.loc[(borough_earnings_ranking_filtered_year_df[\"BOROUGH\"]==_borough) &\r\n (borough_earnings_ranking_filtered_year_df[\"YEAR\"]==year)][\"RANK\"].values[0]\r\n \r\n borough_ranking_by_year_list.append(borough_ranking_for_year)\r\n \r\n all_borough_ranking_by_year.append(borough_ranking_by_year_list)\r\n \r\n columns = [\"Borough\"] + list(years_sorted)\r\n \r\n all_borough_ranking_by_year_df = pd.DataFrame(all_borough_ranking_by_year, columns=columns).set_index(\"Borough\")\r\n \r\n ## Report Version which contains 3-4 values\r\n \r\n tmb_borough_ranking_by_year_df = all_borough_ranking_by_year_df.copy()\r\n\r\n tmb_borough_ranking_by_year_df = tmb_borough_ranking_by_year_df.loc[(tmb_borough_ranking_by_year_df.index == top_borough) | \r\n (tmb_borough_ranking_by_year_df.index == mid_borough) |\r\n (tmb_borough_ranking_by_year_df.index == borough) | \r\n (tmb_borough_ranking_by_year_df.index == bottom_borough)]\r\n \r\n borough_ranking_by_year_df = tmb_borough_ranking_by_year_df.loc[(tmb_borough_ranking_by_year_df.index == borough)]\r\n \r\n ## Textual trend based on linear regression coefficient\r\n ## -ve then increasing i.e. going from a lower to higher ranking per capita\r\n ## +ve then decreasing i.e. going from a higher to lower ranking per capita\r\n ## 0 then flat\r\n \r\n y = [borough_ranking_by_year_df.loc[borough].tolist()]\r\n x = [int(x) for x in borough_ranking_by_year_df.columns.tolist()]\r\n\r\n slope, intercept, r, p, std_err = stats.linregress(x, y)\r\n\r\n rate = \"\"\r\n directio = \"\"\r\n if slope == 0:\r\n earnings_direction = \"flat\"\r\n rate = \"have stayed the same\"\r\n elif slope < 0: \r\n earnings_direction = \"up\"\r\n if slope >= -0.1:\r\n rate = \"have slightly increased\"\r\n elif slope >= - 0.5:\r\n rate = \"have moderately increased\"\r\n else:\r\n rate = \"have greatly increased\"\r\n else:\r\n earnings_direction = \"down\"\r\n if slope <= 0.1:\r\n rate = \"have slightly decreased\"\r\n elif slope <= 0.5:\r\n rate = \"have moderately decreased\"\r\n else:\r\n rate = \"have greatly decreased\"\r\n \r\n earnings_trend_text = f\"Earnings {rate} over the period\" \r\n \r\n ####\r\n #### CREATE THE MAIN REPORT PLOT OF TOP, MID, BOTTOM\r\n #### \r\n \r\n palette = sns.color_palette(cc.glasbey, n_colors=len(tmb_borough_ranking_by_year_df.index.values))\r\n my_cmap = ListedColormap(sns.color_palette(palette).as_hex())\r\n ax = tmb_borough_ranking_by_year_df.T.plot(figsize=(20, 10), marker=\"o\", ms=5, cmap=my_cmap)\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Borough Rankings of Average Earnings {} and {}\".format(year_from, year_to), fontsize=20)\r\n \r\n # plt.xticks(fontsize=20)\r\n \r\n # Generate labels from the Borough names for the Y Axis\r\n plt.yticks(range(1, len(borough_sort_order)+1), [borough_sort_order.iloc[i] for i in range(len(borough_sort_order))])\r\n plt.yticks(fontsize=15)\r\n plt.xticks(fontsize=20)\r\n \r\n plt.gca().invert_yaxis()\r\n plt.gca().get_legend().remove()\r\n \r\n for line in ax.get_lines():\r\n if (line.get_label() == borough) or (line.get_label() == top_borough) or (line.get_label() == mid_borough) or (line.get_label() == bottom_borough):\r\n line.set_linewidth(5)\r\n line.set_ms(10)\r\n else:\r\n line.set_linewidth(2)\r\n line.set_ms(4)\r\n line.set_alpha(0.0)\r\n \r\n bump_borough_earnings_ranking_filtered_report_plot_file = \"./reports/generation/images/{}_bump_borough_earnings_ranking_filtered_{}_{}_{}_report.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=bump_borough_earnings_ranking_filtered_report_plot_file, save_artefacts=True)\r\n \r\n\r\n ####\r\n #### CREATE THE PLOTS - APPENDICES VERSION\r\n #### \r\n \r\n palette = sns.color_palette(cc.glasbey, n_colors=len(borough_sort_order))\r\n my_cmap = ListedColormap(sns.color_palette(palette).as_hex())\r\n ax = all_borough_ranking_by_year_df.T.plot(figsize=(20, 10), marker=\"o\", ms=5, cmap=my_cmap)\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Borough Rankings of Average Earnings {} and {}\".format(year_from, year_to), fontsize=20)\r\n \r\n plt.xticks(fontsize=10)\r\n \r\n ## Generate labels from the Borough names for the Y Axis\r\n plt.yticks(range(1, len(borough_sort_order)+1), [borough_sort_order.iloc[i] for i in range(len(borough_sort_order))])\r\n plt.yticks(fontsize=15)\r\n \r\n plt.gca().invert_yaxis()\r\n plt.gca().get_legend().remove()\r\n \r\n for line in ax.get_lines():\r\n if (line.get_label() == borough) or (line.get_label() == top_borough) or (line.get_label() == mid_borough) or (line.get_label() == bottom_borough):\r\n line.set_linewidth(5)\r\n line.set_ms(10)\r\n else:\r\n line.set_linewidth(2)\r\n line.set_ms(4)\r\n line.set_alpha(0.3)\r\n \r\n \r\n bump_borough_earnings_ranking_filtered_appendix_plot_file = \"./reports/generation/images/{}_bump_borough_earnings_ranking_filtered_{}_{}_{}_appendix.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=bump_borough_earnings_ranking_filtered_appendix_plot_file, save_artefacts=True)\r\n\r\n ###\r\n ### EDUCATION\r\n ###\r\n education_by_borough_year_df_all = report_context[\"education_by_borough_year_df_all\"]\r\n education_by_borough_year_df_reduced = report_context[\"education_by_borough_year_df_reduced\"]\r\n education_latest_data_year = report_context[\"education_latest_data_year\"]\r\n\r\n ###\r\n ### HORIZONTAL PLOT 100% BY BOROUGH - REPORT\r\n ###\r\n \r\n stacked_data = education_by_borough_year_df_reduced.apply(lambda x: x*100/sum(x), axis=1)\r\n \r\n ax = stacked_data.plot.barh(stacked=True, figsize=(20, 5))\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Education {}\".format(education_latest_data_year), fontsize=20)\r\n ax.set_ylabel(\"\")\r\n ax.legend(title=\"legend\")\r\n ax.legend(loc=\"upper right\")\r\n \r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=20)\r\n plt.gca().invert_yaxis()\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n plt.tight_layout()\r\n\r\n education_horizontal_stacked_report_plot_file = \"./reports/generation/images/{}_education_horizontal_stacked_{}_{}_{}_report.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=education_horizontal_stacked_report_plot_file, save_artefacts=True)\r\n\r\n ###\r\n ### HORIZONTAL PLOT 100% BY BOROUGH - APPENDIX\r\n ###\r\n \r\n stacked_data = education_by_borough_year_df_all.apply(lambda x: x*100/sum(x), axis=1)\r\n \r\n ax = stacked_data.plot.barh(stacked=True, figsize=(20, 15))\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Education {}\".format(education_latest_data_year), fontsize=20)\r\n ax.set_ylabel(\"\")\r\n ax.legend(title=\"legend\")\r\n ax.legend(loc=\"upper right\")\r\n \r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=15)\r\n plt.gca().invert_yaxis()\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n plt.tight_layout()\r\n \r\n education_horizontal_stacked_appendix_plot_file = \"./reports/generation/images/{}_education_horizontal_stacked_{}_{}_{}_appendix.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=education_horizontal_stacked_appendix_plot_file, save_artefacts=True)\r\n \r\n ###\r\n ### CRIME\r\n ###\r\n \r\n ## Retrieve the data\r\n borough_crime_per_capita_by_year_df = report_context[\"borough_crime_per_capita_by_year_df\"]\r\n\r\n borough_crime_per_capita_filtered_year_df = borough_crime_per_capita_by_year_df[(borough_crime_per_capita_by_year_df[\"YEAR\"].astype(int) >= year_from) & (borough_crime_per_capita_by_year_df[\"YEAR\"].astype(int) <= year_to)]\r\n \r\n years_sorted = borough_crime_per_capita_filtered_year_df[\"YEAR\"].sort_values().drop_duplicates()\r\n first_year = years_sorted.iloc[0]\r\n \r\n borough_sort_order = borough_crime_per_capita_filtered_year_df.loc[borough_crime_per_capita_filtered_year_df[\"YEAR\"]==first_year][\"LAD_NAME\"]\r\n top_borough = borough_sort_order.iloc[0]\r\n mid_borough = borough_sort_order.iloc[16]\r\n bottom_borough = borough_sort_order.iloc[-1]\r\n \r\n \r\n ## Loop through the boroughs in the sort order of first year\r\n all_borough_ranking_by_year = []\r\n \r\n ## BOROUGH LOOP\r\n for _borough in borough_sort_order:\r\n \r\n borough_ranking_by_year_list = []\r\n \r\n ## Add the Borough\r\n borough_ranking_by_year_list.append(_borough)\r\n \r\n ## YEAR LOOP\r\n for year in years_sorted:\r\n \r\n borough_ranking_for_year = borough_crime_per_capita_filtered_year_df.loc[(borough_crime_per_capita_filtered_year_df[\"LAD_NAME\"]==_borough) &\r\n (borough_crime_per_capita_filtered_year_df[\"YEAR\"]==year)][\"RANK\"].values[0]\r\n \r\n borough_ranking_by_year_list.append(borough_ranking_for_year)\r\n \r\n all_borough_ranking_by_year.append(borough_ranking_by_year_list)\r\n \r\n columns = [\"Borough\"] + list(years_sorted)\r\n \r\n all_borough_ranking_by_year_df = pd.DataFrame(all_borough_ranking_by_year, columns=columns).set_index(\"Borough\")\r\n \r\n tmb_borough_ranking_by_year_df = all_borough_ranking_by_year_df.copy()\r\n \r\n tmb_borough_ranking_by_year_df = tmb_borough_ranking_by_year_df.loc[(tmb_borough_ranking_by_year_df.index == top_borough) | \r\n (tmb_borough_ranking_by_year_df.index == mid_borough) |\r\n (tmb_borough_ranking_by_year_df.index == borough) | \r\n (tmb_borough_ranking_by_year_df.index == bottom_borough)]\r\n \r\n borough_ranking_by_year_df = tmb_borough_ranking_by_year_df.loc[(tmb_borough_ranking_by_year_df.index == borough)]\r\n \r\n ## Textual trend based on linear regression coefficient\r\n ## -ve then increasing i.e. going from a lower to higher ranking per capita\r\n ## +ve then decreasing i.e. going from a higher to lower ranking per capita\r\n ## 0 then flat\r\n \r\n y = [borough_ranking_by_year_df.loc[borough].tolist()]\r\n x = [int(x) for x in borough_ranking_by_year_df.columns.tolist()]\r\n\r\n slope, intercept, r, p, std_err = stats.linregress(x, y)\r\n\r\n rate = \"\"\r\n directio = \"\"\r\n if slope == 0:\r\n crime_direction = \"flat\"\r\n rate = \"has stayed the same\"\r\n elif slope < 0: \r\n crime_direction = \"up\"\r\n if slope >= -0.1:\r\n rate = \"has slightly increased\"\r\n elif slope >= - 0.5:\r\n rate = \"has moderately increased\"\r\n else:\r\n rate = \"has greatly increased\"\r\n else:\r\n crime_direction = \"down\"\r\n if slope <= 0.1:\r\n rate = \"has slightly decreased\"\r\n elif slope <= 0.5:\r\n rate = \"has moderately decreased\"\r\n else:\r\n rate = \"has greatly decreased\"\r\n \r\n crime_trend_text = f\"Crime {rate} over the period\"\r\n\r\n ####\r\n #### CREATE THE MAIN REPORT PLOT OF TOP, MID, BOTTOM\r\n #### \r\n \r\n palette = sns.color_palette(cc.glasbey, n_colors=len(tmb_borough_ranking_by_year_df.index.values))\r\n my_cmap = ListedColormap(sns.color_palette(palette).as_hex())\r\n ax = tmb_borough_ranking_by_year_df.T.plot(figsize=(20, 10), marker=\"o\", ms=5, cmap=my_cmap)\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Crimes Per Capita {} and {}\".format(year_from, year_to), fontsize=20)\r\n \r\n plt.xticks(fontsize=20)\r\n \r\n ## Generate labels from the Borough names for the Y Axis\r\n \r\n plt.yticks(range(1, len(borough_sort_order)+1), [borough_sort_order.iloc[i] for i in range(len(borough_sort_order))])\r\n plt.yticks(fontsize=15)\r\n plt.xticks(fontsize=10)\r\n \r\n ax.xaxis.set_minor_locator(MultipleLocator(1))\r\n \r\n plt.gca().invert_yaxis()\r\n plt.gca().get_legend().remove()\r\n \r\n for line in ax.get_lines():\r\n if (line.get_label() == borough) or (line.get_label() == top_borough) or (line.get_label() == mid_borough) or (line.get_label() == bottom_borough):\r\n line.set_linewidth(5)\r\n line.set_ms(10)\r\n else:\r\n line.set_linewidth(2)\r\n line.set_ms(4)\r\n line.set_alpha(0.0) \r\n\r\n bump_borough_crime_per_capita_ranking_filtered_report_plot_file = \"./reports/generation/images/{}_bump_borough_crime_per_capita_ranking_filtered_{}_{}_{}_report.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=bump_borough_crime_per_capita_ranking_filtered_report_plot_file, save_artefacts=True)\r\n \r\n ####\r\n #### CREATE THE PLOTS - APPENDICES VERSION\r\n #### \r\n \r\n palette = sns.color_palette(cc.glasbey, n_colors=len(borough_sort_order))\r\n my_cmap = ListedColormap(sns.color_palette(palette).as_hex())\r\n ax = all_borough_ranking_by_year_df.T.plot(figsize=(20, 10), marker=\"o\", ms=1, cmap=my_cmap)\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Crimes Per Capita {} and {}\".format(year_from, year_to), fontsize=20)\r\n \r\n plt.xticks(fontsize=20)\r\n \r\n ## Generate labels from the Borough names for the Y Axis\r\n plt.yticks(range(1, len(borough_sort_order)+1), [borough_sort_order.iloc[i] for i in range(len(borough_sort_order))])\r\n plt.yticks(fontsize=15)\r\n plt.xticks(fontsize=10)\r\n \r\n ax.xaxis.set_minor_locator(MultipleLocator(1))\r\n \r\n plt.gca().invert_yaxis()\r\n plt.gca().get_legend().remove()\r\n \r\n for line in ax.get_lines():\r\n if (line.get_label() == borough) or (line.get_label() == top_borough) or (line.get_label() == mid_borough) or (line.get_label() == bottom_borough):\r\n line.set_linewidth(5)\r\n line.set_ms(10)\r\n else:\r\n line.set_linewidth(2)\r\n line.set_ms(4)\r\n line.set_alpha(0.3)\r\n \r\n bump_borough_crime_per_capita_ranking_filtered_appendix_plot_file = \"./reports/generation/images/{}_bump_borough_crime_per_capita_ranking_filtered_{}_{}_{}_appendix.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=bump_borough_crime_per_capita_ranking_filtered_appendix_plot_file, save_artefacts=True)\r\n\r\n ###\r\n ### ETHNICITY\r\n ###\r\n \r\n ethnicity_by_borough_year_df_all = report_context[\"ethnicity_by_borough_year_df_all\"]\r\n ethnicity_by_borough_year_df_reduced = report_context[\"ethnicity_by_borough_year_df_reduced\"]\r\n ethnicity_latest_data_year = report_context[\"ethnicity_latest_data_year\"]\r\n \r\n ###\r\n ### HORIZONTAL PLOT 100% BY BOROUGH - REPORT\r\n ###\r\n \r\n stacked_data = ethnicity_by_borough_year_df_reduced.apply(lambda x: x*100/sum(x), axis=1)\r\n \r\n ax = stacked_data.plot.barh(stacked=True, figsize=(20, 5))\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Ethnicity {}\".format(ethnicity_latest_data_year), fontsize=20)\r\n ax.set_ylabel(\"\")\r\n ax.legend(title=\"legend\")\r\n ax.legend(loc=\"upper right\")\r\n \r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=20)\r\n plt.gca().invert_yaxis()\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n plt.tight_layout()\r\n \r\n ethnicity_horizontal_stacked_report_plot_file = \"./reports/generation/images/{}_ethnicity_horizontal_stacked_{}_{}_{}_report.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=ethnicity_horizontal_stacked_report_plot_file, save_artefacts=True)\r\n \r\n ###\r\n ### HORIZONTAL PLOT 100% BY BOROUGH - APPENDIX\r\n ###\r\n \r\n stacked_data = ethnicity_by_borough_year_df_all.apply(lambda x: x*100/sum(x), axis=1)\r\n \r\n ax = stacked_data.plot.barh(stacked=True, figsize=(20, 15))\r\n \r\n ax.grid(False)\r\n ax.set_title(\"Ethnicity {}\".format(ethnicity_latest_data_year), fontsize=20)\r\n ax.set_ylabel(\"\")\r\n ax.legend(title=\"legend\")\r\n ax.legend(loc=\"upper right\")\r\n \r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=15)\r\n plt.gca().invert_yaxis()\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n plt.tight_layout()\r\n \r\n ethnicity_horizontal_stacked_appendix_plot_file = \"./reports/generation/images/{}_ethnicity_horizontal_stacked_{}_{}_{}_appendix.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=ethnicity_horizontal_stacked_appendix_plot_file, save_artefacts=True)\r\n \r\n\r\n ###\r\n ### GENERAL_HEALTH\r\n ###\r\n \r\n general_health_by_borough_year_df_all = report_context[\"general_health_by_borough_year_df_all\"]\r\n general_health_by_borough_year_df_reduced = report_context[\"general_health_by_borough_year_df_reduced\"]\r\n general_health_latest_data_year = report_context[\"general_health_latest_data_year\"]\r\n \r\n ###\r\n ### HORIZONTAL PLOT 100% BY BOROUGH - REPORT\r\n ###\r\n \r\n stacked_data = general_health_by_borough_year_df_reduced.apply(lambda x: x*100/sum(x), axis=1)\r\n \r\n ax = stacked_data.plot.barh(stacked=True, figsize=(20, 5))\r\n \r\n ax.grid(False)\r\n ax.set_title(\"General Health {}\".format(general_health_latest_data_year), fontsize=20)\r\n ax.set_ylabel(\"\")\r\n ax.legend(title=\"legend\")\r\n ax.legend(loc=\"upper right\")\r\n \r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=20)\r\n plt.gca().invert_yaxis()\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n plt.tight_layout() \r\n\r\n general_health_horizontal_stacked_report_plot_file = \"./reports/generation/images/{}_general_health_horizontal_stacked_{}_{}_{}_report.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=general_health_horizontal_stacked_report_plot_file, save_artefacts=True)\r\n\r\n ###\r\n ### HORIZONTAL PLOT 100% BY BOROUGH - APPENDIX\r\n ###\r\n \r\n stacked_data = general_health_by_borough_year_df_all.apply(lambda x: x*100/sum(x), axis=1)\r\n \r\n ax = stacked_data.plot.barh(stacked=True, figsize=(20, 15))\r\n \r\n ax.grid(False)\r\n ax.set_title(\"General Health {}\".format(general_health_latest_data_year), fontsize=20)\r\n ax.set_ylabel(\"\")\r\n ax.legend(title=\"legend\")\r\n ax.legend(loc=\"upper right\")\r\n #\r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=15)\r\n plt.gca().invert_yaxis()\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n plt.tight_layout()\r\n\r\n general_health_horizontal_stacked_appendix_plot_file = \"./reports/generation/images/{}_general_health_horizontal_stacked_{}_{}_{}_appendix.png\".format(session_id, city, year_from, year_to)\r\n mlib.save_plot_filename(plot=plt, filename=general_health_horizontal_stacked_appendix_plot_file, save_artefacts=True)\r\n\r\n \r\n ###\r\n ### ALL THE FORMATTED TEXT AND PLOT FILES TO GO INTO THE REPORT GENERATION\r\n ###\r\n report_context[\"city\"] = city.capitalize()\r\n report_context[\"ward_name\"] = ward_name\r\n report_context[\"borough\"] = borough\r\n report_context[\"post_code\"] = post_code\r\n report_context[\"post_code_or_ward_name\"] = post_code if post_code != \"\" else ward_name\r\n\r\n report_context[\"location_field_01\"] = location_field_01\r\n report_context[\"population_field_01\"] = population_field_01\r\n report_context[\"population_mekko_plot_gender\"] = mekko_gender_borough_plot_file\r\n\r\n report_context[\"house_hold_01\"] = house_hold[0]\r\n report_context[\"house_hold_02\"] = house_hold[1]\r\n report_context[\"house_hold_03\"] = house_hold[2]\r\n report_context[\"house_hold_04\"] = house_hold[3]\r\n\r\n report_context[\"education_01\"] = education[0]\r\n report_context[\"education_02\"] = education[1]\r\n report_context[\"education_03\"] = education[2]\r\n report_context[\"education_04\"] = education[3]\r\n report_context[\"education_05\"] = education[4]\r\n report_context[\"education_06\"] = education[5]\r\n \r\n ## Earnins - Report and Appendix\r\n report_context[\"earnings_rankings_by_borough_plot_report\"] = bump_borough_earnings_ranking_filtered_report_plot_file\r\n report_context[\"earnings_rankings_by_borough_plot_appendix\"] = bump_borough_earnings_ranking_filtered_appendix_plot_file\r\n\r\n ## Crime - Report and Appendix \r\n report_context[\"crime_per_capita_ranking_by_borough_plot_report\"] = bump_borough_crime_per_capita_ranking_filtered_report_plot_file\r\n report_context[\"crime_per_capita_ranking_by_borough_plot_appendix\"] = bump_borough_crime_per_capita_ranking_filtered_appendix_plot_file\r\n \r\n report_context[\"earnings_direction\"] = earnings_direction\r\n report_context[\"earnings_flat\"] = (earnings_direction.lower() == \"flat\")\r\n report_context[\"earnings_up\"] = (earnings_direction.lower() == \"up\")\r\n report_context[\"earnings_down\"] = (earnings_direction.lower() == \"down\")\r\n report_context[\"earnings_trend_text\"] = earnings_trend_text\r\n \r\n report_context[\"crime_direction\"] = crime_direction\r\n report_context[\"crime_flat\"] = (crime_direction.lower() == \"flat\")\r\n report_context[\"crime_up\"] = (crime_direction.lower() == \"up\")\r\n report_context[\"crime_down\"] = (crime_direction.lower() == \"down\")\r\n report_context[\"crime_trend_text\"] = crime_trend_text\r\n \r\n ## Education report and full for appendix education_horizontal_stacked_plot_report_file\r\n report_context[\"education_horizontal_stacked_report_plot\"] = education_horizontal_stacked_report_plot_file\r\n report_context[\"education_horizontal_stacked_appendix_plot\"] = education_horizontal_stacked_appendix_plot_file\r\n \r\n ## Ethnicity report and full for appendix \r\n report_context[\"ethnicity_horizontal_stacked_report_plot\"] = ethnicity_horizontal_stacked_report_plot_file\r\n report_context[\"ethnicity_horizontal_stacked_appendix_plot\"] = ethnicity_horizontal_stacked_appendix_plot_file\r\n \r\n ## General Health \r\n report_context[\"general_health_horizontal_stacked_report_plot\"] = general_health_horizontal_stacked_report_plot_file\r\n report_context[\"general_health_horizontal_stacked_appendix_plot\"] = general_health_horizontal_stacked_appendix_plot_file\r\n \r\n","repo_name":"neal-bamford/sd-map-streamlit","sub_path":"managers/_delete/sd_report_type_general_report_manager.py","file_name":"sd_report_type_general_report_manager.py","file_ext":"py","file_size_in_byte":38834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15999966072","text":"from django.shortcuts import render\nfrom django.views import View\nfrom store.models import Mac, Ipad, Bose, Beatsbydre, Applecare, IPhone, Appleaccessories, Payment, Order, Product, \\\n Customer\n\n\nclass OrderView(View):\n def get(self, request):\n # -------- Nav link List ------\n macs = Mac.get_all_maclist()\n ipads = Ipad.get_all_ipadlist()\n iphones = IPhone.get_all_iPhonelist()\n beats = Beatsbydre.get_all_beatsbydrelist()\n applecares = Applecare.get_all_applecarelist()\n appleaccessories = Appleaccessories.get_all_appleaccessorieslist()\n bose = Bose.get_all_boselist()\n # -------- Nav link List End------\n payments = Payment.get_all_payment()\n\n userid = request.session.get('userid')\n orders = Order.get_order_userId(userid)\n\n context = {'macs': macs, 'ipads': ipads, 'iphones': iphones, 'beats': beats,\n 'applecares': applecares, 'accessories': appleaccessories, 'bose': bose,\n 'payments': payments, 'orders': orders}\n return render(request, 'order.html', context)\n\n","repo_name":"kiranlamalopchan/E-Commerce-Website","sub_path":"KiranWeb/store/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29747122475","text":"\nfrom marshmallow import Schema, fields, validates\n\nfrom backend.errors.request_error import ValidationError\nfrom ..models.datespan import DatespanSchema\n\n\nclass UserOrdersSchema(Schema):\n page = fields.Integer()\n page_size = fields.Integer()\n datespan = fields.Nested(DatespanSchema)\n\n @validates(\"page\")\n def validate_page(self, value, **kwargs):\n if value <= 0:\n raise ValidationError(\"'page' must be a natural positive number.\")\n\n @validates(\"page_size\")\n def validate_page_size(self, value, **kwargs):\n if value <= 0:\n raise ValidationError(\"'page_size' must be a natural positive number.\")\n elif value > 100:\n raise ValidationError(\"'page_size' must be a natural positive number.\")\n","repo_name":"willrp/willbuyer","sub_path":"backend/util/request/order/user_orders/user_orders_schema.py","file_name":"user_orders_schema.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42540011796","text":"# Python Script to run VSG on a VHDL file\n# Authors: Ross Snider, Trevor Vannoy\n# SPDX-License-Identifier: MIT\n#\n# Steps to format/check a VHDL file\n# 1. Set the VHDL file name and path (lines 18-19)\n# Note: In the path string Python treats a backslash (\\) as an escape character.\n# Use forward slashes (/), double back slashes (\\\\) or a string literal (r\"\\\").\n# 2. Set the yaml path and file name for the yaml vhdl style guide (line 22)\n# 3. Run this script in Python\n# 4. Fix VHDL style errors/warnings and rerun as necessary\n# Note: requires vsg installed: https://github.com/jeremiah-c-leary/vhdl-style-guide\n\nimport os\nfrom pathlib import Path\n\n# VHDL file to format\nvhdl_file = \"vhdl_file_name.vhd\"\nvhdl_path = Path(r\"C:/\")\n\n# VHDL Style Guide location\nyaml_path = Path(r\"C:/\")\nyaml_file = yaml_path / \"adsd_vhdl_style.yaml\"\n\n# Get current directory\ncwd = os.getcwd()\nprint(\"Current working directory: {0}\".format(cwd))\n\n# Change to directory containing VHDL file\nos.chdir(vhdl_path)\nprint(\"Changing to VHDL file directory: {0}\".format(os.getcwd()))\n\n# Modify/Format the VHDL file\ncommand_str = str(\"vsg -c \" + str(yaml_file) + \" -f \" + vhdl_file + \" --fix\")\nprint(f\"command: {command_str}\")\nos.system(command_str)\n\n# Change back to the original directory\nos.chdir(cwd)\nprint(\"Changing back to directory: {0}\".format(os.getcwd()))\n","repo_name":"Garidy/Code","sub_path":"intro/python/vsg/format_vhdl_file.py","file_name":"format_vhdl_file.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"71062801544","text":"from raptor.src.bash_support import print_colored\nfrom raptor.src.bash_support import prompt\nfrom raptor.src.bash_support import call_command\nfrom raptor.src.config import get_config\n\nimport re\n\ndef lint(files):\n \"\"\"\n lint(files: [str]) -> boolean\n\n Runs the configured linter for each file in files, and returns\n True if no linter issues are found, False otherwise.\n \"\"\"\n config = get_config()\n try:\n lint_engine = config['linter']\n except KeyError:\n print_colored('No linter configured for this project.', color='yellow')\n return True\n\n print_colored('Running Linter...', color='none')\n\n err_codes = 0\n for filename in files:\n for lint_rule in lint_engine:\n regex = lint_rule['pattern']\n if re.match(regex, filename):\n linter = lint_rule['command']\n cmd = '%s %s' % (linter, filename)\n err_codes += call_command(cmd)\n break\n \n return err_codes == 0\n\ndef prompt_lint(files):\n \"\"\"\n prompt_lint(files: [str]) -> boolean\n\n Higher level wrapper around lint() which prompts user upon success\n or failure.\n \"\"\"\n\n if not lint(files):\n return prompt('Lint raised unresolved issues. Continue?', False)\n else:\n print_colored('Lint raised no issues', color='green')\n return True\n","repo_name":"lfscheidegger/Raptor","sub_path":"raptor/src/jobs/lint.py","file_name":"lint.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21097984139","text":"import collections\nimport fnmatch\nfrom typing import Iterable, Sequence\n\nfrom google.cloud import storage\n\n\ndef get_matching_blobs(client: storage.Client,\n uri_patterns: Iterable[str]) -> Sequence[storage.Blob]:\n \"\"\"Returns a list of Blob from matching uri patterns on GCS.\n\n Args:\n client: An instance of `google.cloud.storage.Client`.\n uri_patterns: Iterable of strings representing GCS paths, can contain\n a wildcard to match multiple files.\n \"\"\"\n bucket_to_pattern_map = collections.defaultdict(list)\n for pattern in uri_patterns:\n pattern = pattern.removeprefix('gs://')\n bucket_name, blob_name_pattern = pattern.split('/', 1)\n bucket_to_pattern_map[bucket_name].append(blob_name_pattern)\n\n blobs = []\n for bucket_name in bucket_to_pattern_map:\n bucket = storage.Bucket(client, bucket_name)\n for blob in client.list_blobs(bucket):\n for blob_name_pattern in bucket_to_pattern_map[bucket_name]:\n if fnmatch.fnmatch(blob.name, blob_name_pattern):\n blobs.append(blob)\n break\n return blobs\n\n\ndef get_matched_uris(client: storage.Client,\n uri_patterns: Iterable[str]) -> Sequence[str]:\n \"\"\"Matches blob uris from given GCS uri patterns.\n\n Args:\n client: An instance of `google.cloud.storage.Client`.\n uri_patterns: Iterable of strings representing GCS paths, can contain\n a wildcard to match multiple files.\n\n Returns:\n List of strings of GCS matching blobs uris.\n \"\"\"\n blobs = get_matching_blobs(client, uri_patterns)\n return [f'gs://{b.bucket.name}/{b.name}' for b in blobs]\n\n\ndef download_file(client: storage.Client,\n *,\n uri_path: str,\n destination_path: str) -> None:\n \"\"\"Downloads a file from GCS on disk at a given destination path.\n\n Args:\n client: An instance of `google.cloud.storage.Client`.\n uri_path: Path to the Google Cloud Storage file to download.\n destination_path: Destination path on disk to store the downloaded content.\n\n Raises:\n ValueError: if the file cannot be found on GCS.\n \"\"\"\n uri_path = uri_path.removeprefix('gs://')\n bucket_name, blob_name = uri_path.split('/', 1)\n bucket = client.bucket(bucket_name)\n source_blob = bucket.get_blob(blob_name)\n if source_blob is None:\n raise ValueError(f'Blob not found for uri: {uri_path}')\n source_blob.download_to_filename(destination_path)\n","repo_name":"google/crmint","sub_path":"backend/jobs/workers/storage/storage_utils.py","file_name":"storage_utils.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"81"} +{"seq_id":"73276960265","text":"# 1018번 체스판 다시 칠하기\r\nimport sys\r\nn, m = map(int, input().split())\r\nchess_board = [list(sys.stdin.readline().rstrip()) for _ in range(n)]\r\nans = sys.maxsize\r\n# 체스판의 8*8크기의 정사각형의 시작점을 추출한다.\r\nfor y in range(n-7):\r\n for x in range(m-7):\r\n # 하얀색 칸으로 시작하는 정사각형이라고 가정했을 때 수정해야할 칸의 개수\r\n start_white_cnt = 0\r\n # 검은색 칸으로 시작하는 정사각형이라고 가정했을 때 수정해야할 칸의 개수\r\n start_black_cnt = 0\r\n # 추출한 정사각형의 모든 칸을 완전 탐색해준다.\r\n for i in range(y, y + 8):\r\n for j in range(x, x+8):\r\n if (i+j) % 2 == 0:\r\n if chess_board[i][j] != \"W\":\r\n start_white_cnt += 1\r\n else:\r\n start_black_cnt += 1\r\n else:\r\n if chess_board[i][j] != \"B\":\r\n start_white_cnt += 1\r\n else:\r\n start_black_cnt += 1\r\n\r\n ans = min(ans, start_white_cnt, start_black_cnt)\r\n\r\nprint(ans)\r\n \r\n \r\n \r\n","repo_name":"glaxyt/bojSolv","sub_path":"1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14986382318","text":"import cfscrape\nimport bs4\nimport base64\n\nbase_link = 'http://kissanime.to'\n\nname = input('Anime Name:')\n\nname_esc = name.replace(' ', '-')\nlink_anime_rel = '/anime/' + name_esc\nlink_anime_abs = base_link + link_anime_rel\n\nscraper = cfscrape.create_scraper()\nsource = scraper.get(link_anime_abs).content.decode('utf-8')\n\nsoup_s = bs4.BeautifulSoup(source, 'html.parser')\n\nfor row in reversed(soup_s('table')[0].findAll('tr')[2:]):\n tds = row.findAll('td')\n ep_link = base_link + tds[0].a['href']\n ep_source = scraper.get(ep_link).content.decode('utf-8')\n soup_e = bs4.BeautifulSoup(ep_source, 'html.parser')\n for opt in soup_e('select', {'id': 'selectQuality'})[0].findAll('option'):\n print('Episode: ', tds[0].a.string.strip())\n print('Quality: ', opt.string)\n print('Link: ', base64.b64decode(opt['value']))","repo_name":"abdullah2993/KissAnime_POC","sub_path":"kiss_anime_dl.py","file_name":"kiss_anime_dl.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21670284322","text":"import json\nimport os\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef main():\n img_dir = os.listdir('./origin')\n for filename in img_dir:\n try:\n print(filename)\n s = filename.split('.')\n json_file = open(\"./result/\" + s[0] + '.json', mode='r+')\n spirit_map = json.loads(json_file.read())\n eye_map = spirit_map['eyes']\n mouth_map = spirit_map['mouth']\n # print(spirit_map)\n image = cv2.imread(\"./origin/\" + filename, cv2.IMREAD_UNCHANGED)\n eyes = crop(image, [eye_map['position'][2], eye_map['position'][3],\n eye_map['position'][2] + eye_map['blockSize'][0],\n eye_map['position'][3] + eye_map['blockSize'][1]])\n mouth = crop(image, [mouth_map['position'][2], mouth_map['position'][3],\n mouth_map['position'][2] + mouth_map['blockSize'][0],\n mouth_map['position'][3] + mouth_map['blockSize'][1]])\n eyes_width = eye_map['blockSize'][0]\n eyes_height = eye_map['blockSize'][1]\n mouth_width = mouth_map['blockSize'][0]\n mouth_height = mouth_map['blockSize'][1]\n eye_map = spirit_map['character']['children']['eyes']\n image[eye_map[3]:eye_map[3] + eyes_height, eye_map[2]:eye_map[2] + eyes_width] = eyes\n mouth_map = spirit_map['character']['children']['mouth']\n image[mouth_map[3]:mouth_map[3] + mouth_height, mouth_map[2]:mouth_map[2] + mouth_width] = mouth\n cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)\n cv2.imwrite('./check/' + filename, image)\n except BaseException as e:\n print(e)\n continue\n\n\ndef crop(img, boundaries):\n min_x, min_y, max_x, max_y = boundaries\n min_x = int(min_x)\n min_y = int(min_y)\n max_x = int(max_x)\n max_y = int(max_y)\n return img[min_y:max_y, min_x:max_x].copy()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"widealpha/python-bigdata-experiment","sub_path":"自选项目/自动识别精灵图片位置/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41239165453","text":"import math\nimport torch\nfrom . import _functional as F\nfrom torch.optim import Optimizer\n\nfrom .utils import (make_coefficient, reduce_array_if_possible_for,\n index_array_or_return_scalar)\nfrom .partial import PartiallyFusedOptimizer\n\n\nclass Adam(Optimizer):\n r\"\"\"Implements Adam algorithm.\n\n .. math::\n \\begin{aligned}\n &\\rule{110mm}{0.4pt} \\\\\n &\\textbf{input} : \\gamma \\text{ (lr)}, \\beta_1, \\beta_2\n \\text{ (betas)},\\theta_0 \\text{ (params)},f(\\theta) \\text{ (objective)} \\\\\n &\\hspace{13mm} \\lambda \\text{ (weight decay)}, \\: amsgrad \\\\\n &\\textbf{initialize} : m_0 \\leftarrow 0 \\text{ ( first moment)},\n v_0\\leftarrow 0 \\text{ (second moment)},\\: \\widehat{v_0}^{max}\\leftarrow 0\\\\[-1.ex]\n &\\rule{110mm}{0.4pt} \\\\\n &\\textbf{for} \\: t=1 \\: \\textbf{to} \\: \\ldots \\: \\textbf{do} \\\\\n &\\hspace{5mm}g_t \\leftarrow \\nabla_{\\theta} f_t (\\theta_{t-1}) \\\\\n &\\hspace{5mm}\\textbf{if} \\: \\lambda \\neq 0 \\\\\n &\\hspace{10mm} g_t \\leftarrow g_t + \\lambda \\theta_{t-1} \\\\\n &\\hspace{5mm}m_t \\leftarrow \\beta_1 m_{t-1} + (1 - \\beta_1) g_t \\\\\n &\\hspace{5mm}v_t \\leftarrow \\beta_2 v_{t-1} + (1-\\beta_2) g^2_t \\\\\n &\\hspace{5mm}\\widehat{m_t} \\leftarrow m_t/\\big(1-\\beta_1^t \\big) \\\\\n &\\hspace{5mm}\\widehat{v_t} \\leftarrow v_t/\\big(1-\\beta_2^t \\big) \\\\\n &\\hspace{5mm}\\textbf{if} \\: amsgrad \\\\\n &\\hspace{10mm}\\widehat{v_t}^{max} \\leftarrow \\mathrm{max}(\\widehat{v_t}^{max},\n \\widehat{v_t}) \\\\\n &\\hspace{10mm}\\theta_t \\leftarrow \\theta_{t-1} - \\gamma \\widehat{m_t}/\n \\big(\\sqrt{\\widehat{v_t}^{max}} + \\epsilon \\big) \\\\\n &\\hspace{5mm}\\textbf{else} \\\\\n &\\hspace{10mm}\\theta_t \\leftarrow \\theta_{t-1} - \\gamma \\widehat{m_t}/\n \\big(\\sqrt{\\widehat{v_t}} + \\epsilon \\big) \\\\\n &\\rule{110mm}{0.4pt} \\\\[-1.ex]\n &\\bf{return} \\: \\theta_t \\\\[-1.ex]\n &\\rule{110mm}{0.4pt} \\\\[-1.ex]\n \\end{aligned}\n\n For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float or a list/tuple/np.array/torch.Tensor of floats, optional): learning rate (default: 1e-3)\n betas (Tuple[float or a list/tuple/np.array/torch.Tensor of floats, float or a list/tuple/np.array/torch.Tensor of floats], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float or a list/tuple/np.array/torch.Tensor of floats, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float or a list/tuple/np.array/torch.Tensor of floats, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n B=1):\n lr, eps, beta1, beta2, weight_decay = reduce_array_if_possible_for(\n lr, eps, betas[0], betas[1], weight_decay)\n betas = (beta1, beta2)\n lr = make_coefficient('learning rate', lr, lb=0.0, ub=float('inf'))\n eps = make_coefficient('epsilon value', eps, lb=0.0, ub=float('inf'))\n betas = make_coefficient('beta parameter at index',\n betas,\n lb=0.0,\n ub=1.0,\n is_tuple=True)\n weight_decay = make_coefficient('weight_decay value',\n weight_decay,\n lb=0.0,\n ub=float('inf'))\n # TODO(wangshangsam): amsgrad array support.\n defaults = dict(lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n amsgrad=amsgrad)\n super(Adam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(Adam, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params_with_grad = []\n grads = []\n exp_avgs = []\n exp_avg_sqs = []\n max_exp_avg_sqs = []\n state_steps = []\n beta1, beta2 = group['betas']\n\n for p in group['params']:\n if p.grad is not None:\n params_with_grad.append(p)\n if p.grad.is_sparse:\n raise RuntimeError(\n 'Adam does not support sparse gradients, please consider SparseAdam instead'\n )\n grads.append(p.grad)\n\n state = self.state[p]\n # Lazy state initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(\n p, memory_format=torch.preserve_format)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(\n p, memory_format=torch.preserve_format)\n if group['amsgrad']:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(\n p, memory_format=torch.preserve_format)\n\n exp_avgs.append(state['exp_avg'])\n exp_avg_sqs.append(state['exp_avg_sq'])\n\n if group['amsgrad']:\n max_exp_avg_sqs.append(state['max_exp_avg_sq'])\n\n # update the steps for each param group update\n state['step'] += 1\n # record the step after step update\n state_steps.append(state['step'])\n\n F.adam(params_with_grad,\n grads,\n exp_avgs,\n exp_avg_sqs,\n max_exp_avg_sqs,\n state_steps,\n amsgrad=group['amsgrad'],\n beta1=beta1,\n beta2=beta2,\n lr=group['lr'],\n weight_decay=group['weight_decay'],\n eps=group['eps'])\n return loss\n\n\nclass PartiallyFusedAdam(PartiallyFusedOptimizer):\n\n def __init__(\n self,\n fused_params,\n unfused_params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n B=1,\n ):\n fused_params = list(fused_params)\n if len(fused_params) == 0:\n fused_adam = None\n else:\n fused_adam = Adam(\n fused_params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n amsgrad=amsgrad,\n B=B,\n )\n unfused_adams = [\n torch.optim.Adam(\n params,\n lr=index_array_or_return_scalar(lr, b),\n betas=(\n index_array_or_return_scalar(betas[0], b),\n index_array_or_return_scalar(betas[1], b),\n ),\n eps=index_array_or_return_scalar(eps, b),\n weight_decay=index_array_or_return_scalar(weight_decay, b),\n amsgrad=amsgrad,\n ) for b, params in enumerate(unfused_params)\n ]\n super(PartiallyFusedAdam, self).__init__(fused_adam, unfused_adams)\n","repo_name":"UofT-EcoSystem/hfta","sub_path":"hfta/optim/adam.py","file_name":"adam.py","file_ext":"py","file_size_in_byte":8880,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"14709782892","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\n\nimport inputs\n\nFLAGS = None\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef main(_):\n # Import data\n # mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\n x = tf.placeholder(tf.float32, [None, 784])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n # conv1 & maxpool1\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n\n # conv2 & maxpool2\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n # fc1\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # fc2\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\n y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n images, labels = inputs.inputs(\n filename=os.path.join(FLAGS.data_dir, inputs.TRAIN_FILE),\n batch_size=32, num_epochs=100)\n\n with tf.train.MonitoredTrainingSession() as sess:\n #tf.global_variables_initializer().run()\n batch_x, batch_y = sess.run([images, labels])\n print('batch_x', batch_x.shape, 'batch_y', batch_y.shape)\n for i in range(1000):\n if i % 100 == 0:\n train_accuracy = sess.run(accuracy, feed_dict={\n x: batch_x, y_: batch_y, keep_prob: 1.0})\n print(\"step %d, training accuracy %g\" % (i, train_accuracy))\n sess.run(train_step, feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5})\n\n # print(\"test accuracy %g\" % sess.run(accuracy, feed_dict={\n # x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\n\nif __name__ == '__main__':\n import os\n\n data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'input_data'))\n print('data_dir:', data_dir)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default=data_dir,\n help='Directory for storing input data')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n","repo_name":"huanghao/learning-tensorflow","sub_path":"mnist/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35246902711","text":"import pygame\nimport sys\nimport random\n\nfrom typing import List, Tuple\n\nscreen_width = 480\nscreen_height = 480\ngridsize = 20\ngrid_width = screen_width // gridsize\ngrid_height = screen_height // gridsize\n\nassert (\n screen_height % 20 == 0 and screen_width % 20 == 0\n), f\"screen width or height must be multiple of gridsize {gridsize}\"\n\nGridPos = Tuple[int, int]\n\nup = (0, -1)\ndown = (0, 1)\nleft = (-1, 0)\nright = (1, 0)\n\n\nclass Snake:\n def __init__(self) -> None:\n self.reset()\n\n def head_pos(self) -> GridPos:\n return self.positions[0]\n\n def turn(self, dir: GridPos) -> None:\n if self.length > 1 and (dir[0] * -1, dir[1] * -1) == self.direction:\n return\n else:\n self.direction = dir\n\n def move(self) -> None:\n cur = self.head_pos()\n dx, dy = self.direction\n new = (\n (cur[0] + (dx * gridsize)) % screen_width,\n (cur[1] + (dy * gridsize)) % screen_height,\n )\n if len(self.positions) > 2 and new in self.positions[2:]:\n self.reset()\n else:\n self.positions.insert(0, new)\n if len(self.positions) > self.length:\n self.positions.pop()\n\n def reset(self) -> None:\n self.length = 1\n self.positions: List[GridPos] = [(screen_width // 2, screen_height // 2)]\n self.direction = random.choice((up, down, left, right))\n self.color = pygame.Color(17, 24, 47)\n self.score = 0\n\n def draw(self, surface: pygame.Surface) -> None:\n for x, y in self.positions:\n r = pygame.Rect((x, y), (gridsize, gridsize))\n pygame.draw.rect(surface, self.color, r)\n pygame.draw.rect(surface, (93, 216, 228), r, 1)\n\n def handle_keys(self) -> None:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.turn(up)\n elif event.key == pygame.K_DOWN:\n self.turn(down)\n elif event.key == pygame.K_LEFT:\n self.turn(left)\n elif event.key == pygame.K_RIGHT:\n self.turn(right)\n elif event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n\nclass Food:\n def __init__(self) -> None:\n self.position: GridPos = (0, 0)\n self.color = pygame.Color(223, 163, 49)\n self.randomize_pos()\n\n def randomize_pos(self) -> None:\n self.position = (\n random.randint(0, grid_width - 1) * gridsize,\n random.randint(0, grid_height - 1) * gridsize,\n )\n\n def draw(self, surface: pygame.Surface) -> None:\n r = pygame.Rect(self.position, (gridsize, gridsize))\n pygame.draw.rect(surface, self.color, r)\n pygame.draw.rect(surface, (93, 216, 228), r, 1)\n\n\ndef draw_grid(surface: pygame.Surface) -> None:\n for row in range(grid_height):\n for col in range(grid_width):\n color = (93, 216, 228) if (row + col) % 2 == 0 else (84, 194, 205)\n r = pygame.Rect((col * gridsize, row * gridsize), (gridsize, gridsize))\n pygame.draw.rect(surface, color, r)\n\n\ndef main() -> None:\n pygame.init()\n pygame.display.set_caption(\"Snake\")\n\n clock = pygame.time.Clock()\n screen = pygame.display.set_mode((screen_width, screen_height), 0, 32)\n\n surface = pygame.Surface(screen.get_size())\n surface = surface.convert()\n draw_grid(surface)\n\n snake = Snake()\n food = Food()\n\n myfont = pygame.font.SysFont(\"monospace\", 16)\n\n while True:\n clock.tick(10)\n snake.handle_keys()\n draw_grid(surface)\n snake.move()\n if snake.head_pos() == food.position:\n snake.length += 1\n snake.score += 1\n food.randomize_pos()\n snake.draw(surface)\n food.draw(surface)\n screen.blit(surface, (0, 0))\n text = myfont.render(f\"Score {snake.score}\", 1, (0, 0, 0))\n screen.blit(text, (5, 10))\n pygame.display.update()\n\n\nmain()","repo_name":"lordzizzy/pygame_snake","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74561617866","text":"\r\nclass Solution(object):\r\n def clock(self, S, T):\r\n count = 0\r\n startSec = self.tranToSec(S)\r\n endSec = self.tranToSec(T)\r\n\r\n \r\n print(\"startSec:{}, endSec:{}\".format(startSec, endSec))\r\n \r\n for sec in range(startSec, endSec+1): \r\n hour = self.secToTime(sec)[0]\r\n minute = self.secToTime(sec)[1]\r\n second = self.secToTime(sec)[2]\r\n #[hour,minute,second]\r\n interest = list(str(hour)) + list(str(minute)) + list(str(second))\r\n print(interest)\r\n print(set(interest))\r\n print(len(set(interest)))\r\n\r\n if len(set(interest)) <= 2:\r\n count += 1\r\n return count\r\n \r\n\r\n def tranToSec(self, string):\r\n strBuf = list(string)\r\n print(strBuf)\r\n return (int(strBuf[0])*10 + int(strBuf[1]))*60*60 + \\\r\n (int(strBuf[3])*10 + int(strBuf[4]))*60 + \\\r\n (int(strBuf[6])*10 + int(strBuf[7]))\r\n\r\n def secToTime(self, sec):\r\n time = []\r\n\r\n hour = int(sec) // (60 * 60)\r\n if hour < 10:\r\n hour = str(\"0\") + str(hour) \r\n time.append(hour)\r\n\r\n minute = (int(sec) % (60 * 60)) // 60\r\n if minute < 10:\r\n minute = str(\"0\") + str(minute) \r\n time.append(minute)\r\n\r\n second = (int(sec) % (60 * 60)) % 60\r\n if second < 10:\r\n second = str(\"0\") + str(second) \r\n time.append(second)\r\n\r\n #print(time)\r\n return time #[hour,minute,second]\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #print(Solution().bracket(\"()()()()()\"))\r\n #print(Solution().bracket(\"(())))(\")) \r\n #print(Solution().clock(\"00:00:00\",\"00:00:10\"))\r\n print(Solution().clock(\"15:15:00\",\"15:15:12\"))","repo_name":"cs1dada/codility","sub_path":"lesson3 Time Complexity/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72937450506","text":"import srr\nimport argparse\nimport numpy as np\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Model for SRR\")\n parser.add_argument(\"nteams\", default=8, type=int, nargs=\"?\")\n parser.add_argument(\"--variant\", default=\"traditional\", type=str)\n parser.add_argument(\"--seed\", default=0, type=int)\n parser.add_argument(\"--ratio\", default=0.2, type=float)\n\n args = parser.parse_args()\n\n for arg, val in args.__dict__.items():\n print(f\"{arg}: {val}\")\n\n assert args.nteams % 2 == 0, \"Odd number of teams\"\n\n models = {\n \"traditional\" : srr.traditional.Model,\n \"matching\": srr.matching.Model,\n \"permutation\": srr.matching.Model\n }\n\n assert args.variant in models.keys(), f\"Unknown variant {args.variant}.\"\n assert 0 <= args.ratio <= 1, f\"Ratio out of bounds.\"\n nteams = args.nteams\n\n costs = srr.get_random_costs(args.nteams, args.ratio, args.seed)\n\n if nteams <= 6:\n # Integer small enough to see things.\n ones = np.where(costs)\n print(ones)\n print(list(zip(*ones)))\n\n model = models[args.variant](nteams, costs)\n\n model.optimize()\n","repo_name":"JasperNL/round-robin","sub_path":"project/solve_srr.py","file_name":"solve_srr.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42874906034","text":"'''\nGiven an encoded string, return its decoded string.\n\nThe encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.\n\nYou may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.\n\nFurthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].\n\nExamples:\n\ns = \"3[a]2[bc]\", return \"aaabcbc\".\ns = \"3[a2[c]]\", return \"accaccacc\".\ns = \"2[abc]3[cd]ef\", return \"abcabccdcdcdef\".\n'''\n\nclass Solution(object):\n def decodeString(self, s):\n counts = []\n result = []\n final = ''\n index = 0 \n\n while (index < len(s)):\n if s[index].isnumeric():\n count = 0 \n while(s[index].isnumeric()):\n count = 10 * count + int(s[index])\n index += 1\n\n counts.append(count)\n\n elif s[index] == '[':\n result.append(final)\n final = ''\n index += 1\n\n elif s[index] == ']':\n temp = []\n temp.append(result.pop())\n count = counts.pop();\n for i in range(count):\n temp.append(final)\n\n final = ''.join(temp)\n index += 1\n\n else:\n final += s[index]\n index += 1\n\n return final\n ","repo_name":"prashantchanne12/Leetcode","sub_path":"decode string.py","file_name":"decode string.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30812486213","text":"import os\nimport numpy as np\nimport pandas as pd\nimport math\n\nimport tensorflow as tf\n# import tf.keras as K\n\nfilepath = os.path.abspath(__file__)\n\nclass PX4Generator(tf.keras.utils.Sequence):\n\n def __init__(self,data, batch_size = 48, timesteps = 400):\n\n self.batch_size = batch_size\n self.timesteps = timesteps\n self.dir = os.path.dirname(filepath)\n self.dataset_path = os.path.join(self.dir,'datasets')\n self.data = data\n self.num_cols = self.data.shape[1]\n self.len = math.floor((self.data.shape[0]-self.timesteps)/(self.batch_size))\n\n\n def __len__(self):\n \"\"\"Number of batch in the Sequence.\n\n Returns:\n The number of batches in the Sequence.\n \"\"\"\n if ((self.data.shape[0]-self.timesteps) % (self.batch_size) == 0):\n return self.len\n\n return self.len + 1\n\n def group_data(self, start_idx, end_idx):\n \"\"\"trims the values of the data between given thresholds.\n\n Arguments:\n start_idx: Minimum threshold \n end_idx: Maximum threshold\n\n Returns:\n A clipped array \n \"\"\"\n\n return self.data[start_idx:end_idx]\n\n\n def __getitem__(self, batch_idx):\n \"\"\"Gets batch at position `batch_idx`.\n\n Arguments:\n batch_idx: position of the batch in the Sequence.\n\n Returns:\n A batch\n \"\"\"\n\n x = np.zeros((self.batch_size,self.timesteps,self.num_cols))\n y = np.zeros((self.batch_size,self.num_cols))\n\n idxi = batch_idx * self.batch_size\n # debug only\n print(\"batch_idx: \",batch_idx)\n # print(\"batch_size: \",self.batch_size)\n # print(\"timesteps: \",self.timesteps)\n # print(\"idxi: \", idxi)\n \n if(batch_idx == (self.__len__() - 1)):\n last_batch_size = self.data.shape[0] - self.timesteps - idxi\n x = np.zeros((last_batch_size,self.timesteps,self.num_cols))\n y = np.zeros((last_batch_size,self.num_cols))\n # debug only\n print(\"last_batch_size: \",last_batch_size)\n for i in range(last_batch_size):\n x[i,:,:] = self.group_data(idxi,idxi + self.timesteps)\n y[i,:] = self.data[idxi + self.timesteps]\n idxi +=1\n\n return x , y\n\n for i in range(self.batch_size):\n x[i,:,:] = self.group_data(idxi,idxi + self.timesteps)\n y[i,:] = self.data[idxi + self.timesteps]\n idxi +=1\n\n return x , y\n","repo_name":"enakilci/TUHH-master-thesis","sub_path":"code/architecture/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18170500177","text":"import ast\nimport html\n\nfrom django.contrib.auth import get_user_model\n\nfrom api.utils import variant_check\nfrom constance import config\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_200_OK, HTTP_404_NOT_FOUND\nfrom rest_framework.views import APIView\n\nfrom api.views.utils import get_distributed_download_url\nfrom core.models import Build, Device\n\n\nUser = get_user_model()\n\n\nclass V1GeneralDeviceAll(APIView):\n \"\"\"\n Returns a list of all devices in shipper, with their active status and variants\n \"\"\"\n\n permission_classes = [AllowAny]\n\n # noinspection PyMethodMayBeStatic\n def get(self, request):\n return_json = {}\n for device in Device.objects.all():\n variants = ast.literal_eval(config.SHIPPER_UPLOAD_VARIANTS)\n has_variants = []\n\n for variant in variants:\n if device.has_enabled_hashed_builds_of_variant(variant=variant):\n has_variants.append(variant)\n\n return_json[device.codename] = {\n \"status\": device.status,\n \"variants\": has_variants,\n }\n\n return Response(return_json, status=HTTP_200_OK)\n\n\nclass V1GeneralMaintainerAll(APIView):\n \"\"\"\n Returns a list of all maintainers' information registered with shipper\n \"\"\"\n\n permission_classes = [AllowAny]\n\n # noinspection PyMethodMayBeStatic\n def get(self, request):\n return_json = {}\n for user in User.objects.all():\n return_json[user.username] = {\n \"active\": user.is_active,\n \"name\": user.get_full_name(),\n \"bio\": user.bio,\n \"profile_picture\": user.profile_picture,\n \"contact_url\": user.contact_url,\n \"devices\": [device.codename for device in user.devices.all()],\n }\n\n return Response(return_json, status=HTTP_200_OK)\n\n\nclass V1GeneralMaintainerActive(APIView):\n \"\"\"\n Returns a list of **active** maintainers' information registered with shipper\n \"\"\"\n\n permission_classes = [AllowAny]\n\n # noinspection PyMethodMayBeStatic\n def get(self, request):\n return_json = {}\n for user in User.objects.filter(is_active=True):\n return_json[user.username] = {\n \"name\": user.get_full_name(),\n \"bio\": user.bio,\n \"profile_picture\": user.profile_picture,\n \"contact_url\": user.contact_url,\n \"devices\": [device.codename for device in user.devices.all()],\n }\n\n return Response(return_json, status=HTTP_200_OK)\n\n\nclass V1GeneralBuildLatest(APIView):\n \"\"\"\n Returns the latest build information for a given device and variant\n \"\"\"\n\n permission_classes = [AllowAny]\n\n # noinspection PyMethodMayBeStatic\n def get(self, request, codename, variant):\n try:\n device = get_object_or_404(Device, codename=codename)\n except Http404:\n return Response(\n {\"message\": \"The specified device does not exist!\"},\n status=HTTP_404_NOT_FOUND,\n )\n\n ret = variant_check(variant)\n if ret:\n return ret\n\n try:\n build = device.get_latest_enabled_hashed_build_of_variant(variant=variant)\n except Build.DoesNotExist:\n return Response(\n {\"message\": \"No builds exist for the specified variant yet!\"},\n status=HTTP_404_NOT_FOUND,\n )\n\n return Response(\n {\n \"datetime\": int(build.build_date.strftime(\"%s\")),\n \"filename\": \"{}.zip\".format(build.file_name),\n \"sha256\": build.sha256sum,\n \"size\": build.size,\n \"version\": build.version,\n \"variant\": html.escape(variant),\n \"mirror_url\": get_distributed_download_url(request, build),\n },\n status=HTTP_200_OK,\n )\n","repo_name":"shipperstack/shipper","sub_path":"server/api/views/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"40643989675","text":"\"\"\"References:\n\nSimonyan, Karen, and Andrew Zisserman. \"Very deep convolutional networks for\nlarge-scale image recognition.\" arXiv preprint arXiv:1409.1556 (2014).\n\"\"\"\nimport find_mxnet\nimport mxnet as mx\n\ndef get_symbol(num_classes=1000, dataset='imagenet'):\n ## define alexnet\n data = mx.symbol.Variable(name=\"data\")\n # group 1\n #change the learning rate to 0 or very small one for pretrain\n conv1_1 = mx.symbol.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name=\"conv1_1\", attr={'lr_mult': '0.00'})\n relu1_1 = mx.symbol.Activation(data=conv1_1, act_type=\"relu\", name=\"relu1_1\")\n pool1 = mx.symbol.Pooling(\n data=relu1_1, pool_type=\"max\", kernel=(2, 2), stride=(2,2), name=\"pool1\")\n # group 2\n #change the learning rate to 0 or very small one for pretrain\n conv2_1 = mx.symbol.Convolution(\n data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name=\"conv2_1\", attr={'lr_mult': '0.00'})\n relu2_1 = mx.symbol.Activation(data=conv2_1, act_type=\"relu\", name=\"relu2_1\")\n pool2 = mx.symbol.Pooling(\n data=relu2_1, pool_type=\"max\", kernel=(2, 2), stride=(2,2), name=\"pool2\")\n # group 3\n conv3_1 = mx.symbol.Convolution(\n data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name=\"conv3_1\")\n relu3_1 = mx.symbol.Activation(data=conv3_1, act_type=\"relu\", name=\"relu3_1\")\n conv3_2 = mx.symbol.Convolution(\n data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name=\"conv3_2\")\n relu3_2 = mx.symbol.Activation(data=conv3_2, act_type=\"relu\", name=\"relu3_2\")\n pool3 = mx.symbol.Pooling(\n data=relu3_2, pool_type=\"max\", kernel=(2, 2), stride=(2,2), name=\"pool3\")\n # group 4\n conv4_1 = mx.symbol.Convolution(\n data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name=\"conv4_1\")\n relu4_1 = mx.symbol.Activation(data=conv4_1, act_type=\"relu\", name=\"relu4_1\")\n conv4_2 = mx.symbol.Convolution(\n data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name=\"conv4_2\")\n relu4_2 = mx.symbol.Activation(data=conv4_2, act_type=\"relu\", name=\"relu4_2\")\n pool4 = mx.symbol.Pooling(\n data=relu4_2, pool_type=\"max\", kernel=(2, 2), stride=(2,2), name=\"pool4\")\n # group 5\n conv5_1 = mx.symbol.Convolution(\n data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name=\"conv5_1\")\n relu5_1 = mx.symbol.Activation(data=conv5_1, act_type=\"relu\", name=\"relu5_1\")\n conv5_2 = mx.symbol.Convolution(\n data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name=\"conv5_2\")\n relu5_2 = mx.symbol.Activation(data=conv5_2, act_type=\"relu\", name=\"conv1_2\")\n # pool5 = mx.symbol.Pooling(\n # data=relu5_2, pool_type=\"max\", kernel=(2, 2), stride=(2,2), name=\"pool5\")\n # # group 6\n # flatten = mx.symbol.Flatten(data=pool5, name=\"flatten\")\n # fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name=\"fc6\")\n # relu6 = mx.symbol.Activation(data=fc6, act_type=\"relu\", name=\"relu6\")\n # drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name=\"drop6\")\n # # group 7\n # fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name=\"fc7\")\n # relu7 = mx.symbol.Activation(data=fc7, act_type=\"relu\", name=\"relu7\")\n # drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name=\"drop7\")\n # # output\n # fc8 = mx.symbol.FullyConnected(data=drop7, num_hidden=num_classes, name=\"fc8\")\n # softmax = mx.symbol.SoftmaxOutput(data=fc8, name='softmax')\n # return softmax\n avg = mx.symbol.Pooling(data=relu5_2, kernel=(10, 10), stride=(1, 1), name=\"global_pool\", pool_type='avg')\n # linear classifier\n flatten = mx.symbol.Flatten(data=avg, name='flatten')\n #fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')\n #replace the fc1 name\n fc1 = mx.symbol.FullyConnected(name='fc1' if dataset == 'imagenet' else 'fc1_%s' % dataset, data=flatten, num_hidden=num_classes)\n\n softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')\n return softmax\n","repo_name":"phunterlau/kaggle_statefarm","sub_path":"vgg/symbol_vgg.py","file_name":"symbol_vgg.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"81"} +{"seq_id":"26243324848","text":"from sklearn.neighbors import KNeighborsClassifier\nimport time\nfrom ast import literal_eval\nimport numpy as np\nfrom utils.functions import flatten_df_arr, split_set_randomly\nfrom constants import MIN_K_IMPROVEMENT\nimport utils.validate_models as validate_models\nfrom sklearn.metrics import confusion_matrix\nimport pandas as pd\n\n\nclass KNeighbours():\n final_k = 0\n data = None\n max_k = 0\n batches = 0\n\n def __init__(self, data, max_k=10, final_k=0, batches=10):\n self.data = data\n self.max_k = max_k\n self.final_k = final_k\n self.batches = batches\n\n def train_k(self):\n start_time = time.time()\n data_folds = split_set_randomly(self.data, self.batches)\n best_k = 0\n best_accuracy = 0\n best_score = 0\n for k in range(1, self.max_k + 1):\n print(f'Testowanie k={k}')\n knn = KNeighborsClassifier(k)\n k_score = 0\n k_accuracy = 0\n for index, test_fold in enumerate(data_folds):\n training_data = data_folds.copy()\n training_data.pop(index)\n training_data = flatten_df_arr(training_data)\n training_features = training_data['pitches'].apply(literal_eval)\n training_labels = training_data['key']\n test_features = test_fold['pitches'].apply(literal_eval)\n test_labels = test_fold['key']\n knn.fit(training_features.tolist(), training_labels.tolist())\n predictions = knn.predict(test_features.tolist())\n score = validate_models.count_evaluation_score(predictions, test_labels.tolist())\n accuracy = knn.score(test_features.tolist(), test_labels.tolist())\n k_score = k_score + score\n k_accuracy = k_accuracy + accuracy\n k_score = k_score / len(data_folds)\n k_accuracy = k_accuracy / len(data_folds)\n print(f'Punktacja Mirex dla k={k} wyniosła {k_score}')\n print(f'Dokładność dla k={k} wyniosła {k_accuracy}')\n if k_accuracy - best_accuracy > MIN_K_IMPROVEMENT:\n best_accuracy = k_accuracy\n best_score = k_score\n best_k = k\n print(f'Nowy najlepszy parametr k został znaleziony. Dokładność: {best_accuracy}. Punktacja Mirex: {best_score}. Najlepsza wartość k: {k}')\n end_time = time.time()\n print(f'Poszukiwanie najlepszego hiperparametru k skończone. Najlepsze k={best_k}. Proces zajął {end_time - start_time} sekund.')\n\n def validate_knn(self, test_data):\n knn = KNeighborsClassifier(self.final_k)\n training_features = self.data['pitches'].apply(literal_eval)\n training_labels = self.data['key']\n test_features = test_data['pitches'].apply(literal_eval)\n test_labels = test_data['key']\n knn.fit(training_features.tolist(), training_labels.tolist())\n predictions = knn.predict(test_features.tolist())\n evaluation_score = validate_models.count_evaluation_score(predictions, test_labels.tolist())\n accuracy = knn.score(test_features.tolist(), test_labels.tolist())\n print(f'Dokładność dla zbioru testowego: {round(accuracy * 100, 2)}%')\n print(f'Punktacja Mirex dla zbioru testowego: {round(evaluation_score * 100, 2)}%')\n matrix = pd.DataFrame(confusion_matrix(test_labels, predictions, labels=np.arange(0, 24))) # Matrix\n print(matrix)\n","repo_name":"KrzysztofMoszczynski/automatic-key-detection","sub_path":"utils/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74929999943","text":"from gate_control.button_sense import *\n\ndef epoch_test():\n print('testing','epoch_test')\n t1 = epoch()\n t2 = epoch(state=1)\n t3 = epoch(mock=1)\n assert(t1['state']==0 and t1['mock'] == False)\n assert(t2['state']==1 and t2['mock'] == False)\n assert(t3['state']==0 and t3['mock'] == True)\n print('passed')\n return 1\n\n\ndef filter_history_test():\n print('testing','filter_history_test')\n hist = {}\n for x in range(45):\n hist.update({ts():epoch()})\n sleep(SENSORS['PING'])\n current = filter_history(hist,'current')\n previous = filter_history(hist,'previous')\n last_30 = filter_history(hist,'last_30')\n\n assert(len(last_30)<=30)\n assert(all([ts()-x <= 1 for x in current]))\n assert(all([not(ts()-x <= 1) for x in previous]))\n return 1\n\ndef get_current_activations_test():\n print('testing','get_current_activations_test')\n hist = {}\n for x in range(45):\n hist.update({ts():epoch(state=1)})\n sleep(SENSORS['PING'])\n assert(any(get_current_activations(hist)))\n\n hist = {}\n for x in range(45):\n hist.update({ts():epoch(state=0)})\n sleep(SENSORS['PING'])\n \n assert(len(get_current_activations(hist))==0)\n print('passed')\n return 1\n\ndef get_all_activations_test():\n print('testing','get_all_activations_test')\n hist = {}\n for x in range(45):\n hist.update({ts():epoch(state=1)})\n sleep(SENSORS['PING'])\n assert(len(get_all_activations(hist))==30)\n\n hist = {}\n for x in range(45):\n hist.update({ts():epoch(state=0)})\n sleep(SENSORS['PING'])\n \n assert(len(get_all_activations(hist))==0)\n print('passed')\n return 1\n\ndef action_triage_test():\n print('testing','action_triage_test')\n hist = {}\n for x in range(45):\n hist.update({ts():epoch()})\n sleep(SENSORS['PING'])\n current = get_current_activations(hist)\n last_30 = get_all_activations(hist)\n assert(action_triage(current=current,last_30=last_30) == None)\n\n hist = {}\n for x in range(45):\n hist.update({ts():epoch(state=1)})\n sleep(SENSORS['PING'])\n current = get_current_activations(hist)\n last_30 = get_all_activations(hist)\n assert(action_triage(current=current,last_30=last_30) == 'ebrake')\n\n hist = {}\n for x in range(30):\n hist.update({ts():epoch()})\n sleep(SENSORS['PING'])\n sleep(2)\n for x in range(5):\n hist.update({ts():epoch(state=1)})\n sleep(SENSORS['PING'])\n current = get_current_activations(hist)\n last_30 = get_all_activations(hist)\n assert(action_triage(current=current,last_30=last_30) == 'activation')\n \n print('passed')\n return 1\n\ndef activation_test():\n hist = {}\n for x in range(30):\n hist.update({ts():epoch()})\n sleep(SENSORS['PING'])\n sleep(2)\n for x in range(11):\n hist.update({ts():epoch(state=1)})\n sleep(SENSORS['PING'])\n current = get_current_activations(hist)\n last_30 = get_all_activations(hist)\n assert(action_triage(current=current,last_30=last_30) == 'activation')\n \n\n\ndef run_all_tests():\n #assert(epoch_test()==1)\n #assert(filter_history_test()==1)\n #assert(get_current_activations_test()==1)\n #assert(get_all_activations_test()==1)\n #assert(action_triage_test()==1)\n activation_test()\n\nif __name__ == '__main__':\n run_all_tests()","repo_name":"mark-styx/gate-controller","sub_path":"gate_control/tests/button_sense.py","file_name":"button_sense.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37794561355","text":"from typing import Optional\n\nfrom gnomad.resources.resource_utils import (\n DataException,\n MatrixTableResource,\n TableResource,\n VersionedMatrixTableResource,\n VersionedTableResource,\n)\n\nfrom gnomad_qc.v4.resources.basics import qc_temp_prefix\nfrom gnomad_qc.v4.resources.constants import (\n CURRENT_RELEASE,\n RELEASES,\n)\n\n\ndef annotation_hists_path(release_version: str = CURRENT_RELEASE) -> str:\n \"\"\"\n Return path to file containing ANNOTATIONS_HISTS dictionary.\n Dictionary contains histogram values for each metric.\n For example, \"InbreedingCoeff\": [-0.25, 0.25, 50].\n\n :return: Path to file with annotation histograms\n \"\"\"\n return f\"gs://gnomad/release/{release_version}/json/annotation_hists.json\"\n\n\ndef qual_hists_json_path(release_version: str = CURRENT_RELEASE) -> str:\n \"\"\"\n Fetch filepath for qual histograms JSON.\n\n :param release_version: Release version. Defaults to CURRENT RELEASE\n :return: File path for histogram JSON\n \"\"\"\n return f\"gs://gnomad/release/{release_version}/json/gnomad.exomes.v{release_version}.json\"\n\n\ndef release_ht_path(\n data_type: str = \"exomes\",\n release_version: str = CURRENT_RELEASE,\n public: bool = True,\n) -> str:\n \"\"\"\n Fetch filepath for release (variant-only) Hail Tables.\n\n :param data_type: 'exomes' or 'genomes'\n :param release_version: Release version\n :param public: Determines whether release sites Table is read from public or private bucket. Defaults to private\n :return: File path for desired Hail Table\n \"\"\"\n if public:\n return f\"gs://gnomad-public-requester-pays/release/{release_version}/ht/{data_type}/gnomad.{data_type}.v{release_version}.sites.ht\"\n else:\n return f\"gs://gnomad/release/{release_version}/ht/{data_type}/gnomad.{data_type}.v{release_version}.sites.ht\"\n\n\ndef release_sites(public: bool = False) -> VersionedTableResource:\n \"\"\"\n Retrieve versioned resource for sites-only release Table.\n\n :param public: Determines whether release sites Table is read from public or private bucket. Defaults to private\n :return: Sites-only release Table\n \"\"\"\n return VersionedTableResource(\n default_version=CURRENT_RELEASE,\n versions={\n release: TableResource(\n path=release_ht_path(release_version=release, public=public)\n )\n for release in RELEASES\n },\n )\n\n\ndef release_vcf_path(\n release_version: Optional[str] = None, contig: Optional[str] = None,\n) -> str:\n \"\"\"\n Fetch bucket for release (sites-only) VCFs.\n\n :param release_version: Release version. When no release_version is supplied CURRENT_RELEASE is used.\n :param contig: String containing the name of the desired reference contig. Defaults to the full (all contigs) sites VCF path\n sites VCF path\n :return: Filepath for the desired VCF\n \"\"\"\n if release_version is None:\n release_version = CURRENT_RELEASE\n\n if contig:\n return f\"gs://gnomad/release/{release_version}/vcf/exomes/gnomad.exomes.v{release_version}.sites.{contig}.vcf.bgz\"\n else:\n # if contig is None, return path to sharded vcf bucket\n # NOTE: need to add .bgz or else hail will not bgzip shards\n return f\"gs://gnomad/release/{release_version}/vcf/exomes/gnomad.exomes.v{release_version}.sites.vcf.bgz\"\n\n\ndef release_header_path(release_version: Optional[str] = None) -> str:\n \"\"\"\n Fetch path to pickle file containing VCF header dictionary.\n\n :param release_version: Release version. When no release_version is supplied CURRENT_RELEASE is used\n :return: Filepath for header dictionary pickle\n \"\"\"\n if release_version is None:\n release_version = CURRENT_RELEASE\n\n return f\"gs://gnomad/release/{release_version}/vcf/exomes/gnomad.exomes.v{release_version}_header_dict.pickle\"\n\n\ndef append_to_vcf_header_path(\n subset: str, release_version: str = CURRENT_RELEASE\n) -> str:\n \"\"\"\n Fetch path to TSV file containing extra fields to append to VCF header.\n\n Extra fields are VEP and dbSNP versions.\n\n :param subset: One of the possible release subsets\n :param release_version: Release version. Defaults to CURRENT RELEASE\n :return: Filepath for extra fields TSV file\n \"\"\"\n return f\"gs://gnomad/release/{release_version}/vcf/exomes/extra_fields_for_header{f'_{subset}' if subset else ''}.tsv\"\n","repo_name":"GeneticResources/gnomad_qc","sub_path":"gnomad_qc/v4/resources/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"37436172748","text":"import tundra as tundra\nimport hashlib\nimport hmac\nimport urllib2\nimport json\n\nsys.path.append('/usr/lib/python2.7/dist-packages/')\nimport pygst\npygst.require(\"0.10\")\nimport gst\n\nclass ServiceFusionPlayer:\n def __init__(self):\n tundra.LogInfo(\"** Starting ServiceFusionPlayer **\");\n self.g_handler = GroovesharkHandler()\n self.player = GStreamPlayer(self.PlayerCallback)\n\n self.state = \"STOPPED\"\n self.current_stream = {}\n self.time_played = 0\n\n assert self.g_handler.IsInitialized()\n assert tundra.Scene().connect(\"SceneAdded(QString)\", self.SceneAdded)\n assert tundra.Frame().connect(\"Updated(float)\", self.FrameUpdated)\n\n # Store previous attribute states becouse there's no way of checking which attribute actually changed\n self.prev_song_name = \"\"\n self.prev_state = \"\"\n\n def PlayerCallback(self, message):\n if(message == \"FINISHED\"):\n self.g_handler.MarkStreamFinished(self.current_stream[\"StreamKey\"])\n\n entity = self.scene.GetEntityByNameRaw(\"radio_1\")\n component = entity.dynamiccomponent\n\n if(not component.GetAttribute(\"state\")):\n component.CreateAttribute(\"string\", \"state\")\n\n if(component.GetAttribute(\"state\") != message):\n component.SetAttribute(\"state\", message)\n self.state = message\n\n def SetState(self, state):\n print(\"ServiceFusionPlayer: Setting state to: {0}\".format(state))\n if(state == \"PLAYING\"):\n self.player.Play()\n elif(state == \"STOPPED\"):\n self.player.Stop()\n elif(state == \"PAUSED\"):\n self.player.Pause()\n\n def SceneAdded(self, name):\n self.scene = tundra.Scene().GetSceneRaw(name)\n assert self.scene.connect(\"AttributeChanged(IComponent*, IAttribute*, AttributeChange::Type)\", self.OnAttributeChanged)\n\n def FrameUpdated(self, frametime):\n if(len(self.current_stream) >= 0 and self.state == \"PLAYING\"):\n self.g_handler.AddStreamTime(self.current_stream[\"StreamKey\"], frametime)\n\n def OnAttributeChanged(self, component, attribute, changeType):\n entity = component.ParentEntity()\n if(entity.name != \"radio_1\"):\n return\n\n if(component.typeName != \"EC_DynamicComponent\"):\n return\n\n state = component.GetAttribute(\"state\")\n if(state != self.prev_state):\n self.prev_state = state\n if(state and state != \"\" and state != self.state):\n self.SetState(state)\n return\n\n song_name = component.GetAttribute(\"song\")\n if(song_name != self.prev_song_name):\n self.prev_song_name = song_name\n tundra.LogInfo(\"ServiceFusionPlayer: Searching for song: {0}\".format(song_name))\n\n song_id = self.g_handler.SearchSong(song_name, 1)[0][\"SongID\"]\n if(song_id != 0):\n print(\"ServiceFusionPlayer: Song found! Acquiring stream for first result..\")\n self.current_stream = self.g_handler.GetStreamServer(song_id)\n if(len(self.current_stream[\"url\"]) > 0):\n print(\"ServiceFusionPlayer: ..Success!\")\n self.player.SetStreamURI(self.current_stream[\"url\"])\n else:\n tundra.LogInfo(\"ServiceFusionPlayer: Couldn't acquire stream url!\")\n else:\n tundra.LogInfo(\"ServiceFusionPlayer: No song found with query \\\"{0}\\\"\".format(song_name))\n\n# Handles Grooveshark session, searching, stream fetching and stream handling.\nclass GroovesharkHandler:\n def __init__(self):\n self.key = \"\"\n self.secret = \"\"\n\n self.api_url = \"https://api.grooveshark.com/ws3.php?sig=\"\n self.session_id = \"\"\n self.active_streams = {}\n\n assert self.key != \"\"\n assert self.secret != \"\"\n\n assert self.StartSession()\n\n self.country = self.GetCountry()\n\n self.initialized = True\n\n def IsInitialized(self):\n return self.initialized\n\n def Signature(self, data):\n sig = hmac.new(self.secret, data)\n return sig.hexdigest()\n\n def Request(self, method, params={}):\n data = {}\n data[\"method\"] = method\n data[\"parameters\"] = params\n data[\"header\"] = {}\n data[\"header\"][\"wsKey\"] = self.key\n if(method != \"startSession\"):\n data[\"header\"][\"sessionID\"] = self.session_id\n\n payload = json.dumps(data)\n sig = self.Signature(payload)\n\n #tundra.LogInfo(\"{0}\".format(payload))\n\n req = urllib2.Request(self.api_url+sig, payload)\n response = urllib2.urlopen(req).read()\n\n #tundra.LogInfo(\"{0}\".format(response))\n\n return json.loads(response)\n\n def StartSession(self):\n response = self.Request(\"startSession\")\n if response[\"result\"][\"success\"] == True:\n self.session_id = response[\"result\"][\"sessionID\"]\n tundra.LogInfo(\"GroovesharkHandler: Session started succesfully!\")\n return True\n else:\n return False\n\n def GetCountry(self):\n response = self.Request(\"getCountry\")\n return response[\"result\"]\n\n def SearchSong(self, song_name, limit):\n method = \"getSongSearchResults\"\n params = {}\n params[\"query\"] = song_name\n params[\"country\"] = self.country\n params[\"limit\"] = limit\n\n response = self.Request(method, params)\n\n if(len(response[\"result\"][\"songs\"]) > 0):\n return response[\"result\"][\"songs\"]\n else:\n return []\n\n def GetStreamServer(self, song_id, low_bitrate = False):\n method = \"getStreamKeyStreamServer\"\n params = {}\n params[\"songID\"] = song_id\n params[\"country\"] = self.country\n params[\"lowBitrate\"] = low_bitrate\n\n response = self.Request(method, params)\n\n result = response[\"result\"]\n if(len(result) > 0):\n self.active_streams[result[\"StreamKey\"]] = {}\n self.active_streams[result[\"StreamKey\"]][\"SongID\"] = song_id\n self.active_streams[result[\"StreamKey\"]][\"ServerID\"] = result[\"StreamServerID\"]\n self.active_streams[result[\"StreamKey\"]][\"TimeElapsed\"] = 0\n self.active_streams[result[\"StreamKey\"]][\"Acked\"] = False\n return response[\"result\"]\n else:\n return []\n\n def AddStreamTime(self, stream_key, time_elapsed):\n if(self.active_streams[stream_key][\"Acked\"] == True):\n return\n\n self.active_streams[stream_key][\"TimeElapsed\"] = self.active_streams[stream_key][\"TimeElapsed\"] + time_elapsed\n if(self.active_streams[stream_key][\"TimeElapsed\"] / 30 >= 1):\n self.__MarkStreamOver30s(stream_key, self.active_streams[stream_key])\n\n def MarkStreamFinished(self, stream_key):\n print(\"GroovesharkHandler: Marking stream finished\")\n method = \"markSongComplete\"\n params = {}\n params[\"songID\"] = self.active_streams[\"stream_key\"][\"SongID\"]\n params[\"streamKey\"] = stream_key\n params[\"streamServerID\"] = self.active_streams[\"stream_key\"][\"ServerID\"]\n\n response = self.Request(method, params)\n\n del self.active_streams[\"stream_key\"]\n\n def __MarkStreamOver30s(self, key, stream):\n print(\"GroovesharkHandler: Marking stream duration >30s\")\n method = \"markStreamKeyOver30Secs\"\n params = {}\n params[\"streamKey\"] = key\n params[\"streamServerID\"] = stream[\"ServerID\"]\n\n response = self.Request(method, params)\n stream[\"Acked\"] = True\n\n\n# Simple GStreamer player for now\nclass GStreamPlayer:\n def __init__(self, callback):\n self.callback = callback\n self.player = gst.element_factory_make(\"playbin\", \"player\")\n self.uri = \"\"\n\n bus = self.player.get_bus()\n bus.add_signal_watch()\n bus.connect(\"message\", self.OnMessage)\n\n def OnMessage(self, bus, message):\n if(message.type == gst.MESSAGE_EOS):\n self.player.set_state(gst.STATE_NULL)\n self.uri = \"\"\n self.callback(\"FINISHED\")\n elif(message.type == gst.MESSAGE_ERROR):\n self.player_set_state(gst.STATE_NULL)\n self.uri = \"\"\n self.callback(\"ERROR\")\n\n def SetStreamURI(self, uri):\n self.uri = uri\n self.player.set_property(\"uri\", self.uri)\n\n def Play(self):\n if(self.uri != \"\"):\n self.player.set_state(gst.STATE_PLAYING)\n self.callback(\"PLAYING\")\n else:\n self.Stop()\n\n def Pause(self):\n if(self.uri != \"\"):\n self.player.set_state(gst.STATE_PAUSED)\n self.callback(\"PAUSED\")\n\n def Stop(self):\n self.player.set_state(gst.STATE_NULL)\n self.uri = \"\"\n self.callback(\"STOPPED\")\n\nif __name__ == \"__main__\":\n r = ServiceFusionPlayer()\n","repo_name":"Chiru/ChiruAddons","sub_path":"Additional-ServiceFusionScenes/grooveshark.py","file_name":"grooveshark.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"20191793075","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n#Create a Python class that enables data loading from CSV/XLSX files and provides functionalities\nclass DataAnalyzer:\n def __init__(self, file_path):\n self.file_path = file_path\n self.data = None\n \n#a. Load the CSV/XLSX file.\n def load_data(self):\n if self.file_path.endswith('.csv'):\n self.data = pd.read_csv(self.file_path)\n elif self.file_path.endswith('.xlsx'):\n self.data = pd.read_excel(self.file_path)\n else:\n raise ValueError(\"Unsupported file format\")\n \n#b. Print summaries of all numeric variables in the dataset.\n def summarize_numeric_variables(self):\n if self.data is None:\n raise ValueError(\"Data not loaded yet\")\n \n numeric_columns = self.data.select_dtypes(include=['number']).columns\n numeric_summary = self.data[numeric_columns].describe()\n print(numeric_summary)\n \n#c. Generate a bar graph for a specified categorical variable (variable name provided as input).\n def generate_bar_graph(self, categorical_variable):\n if self.data is None:\n raise ValueError(\"Data not loaded yet\")\n \n if categorical_variable not in self.data.columns:\n raise ValueError(\"Categorical variable not found in dataset\")\n \n category_counts = self.data[categorical_variable].value_counts()\n category_counts.plot(kind='bar')\n plt.title(f'Bar Graph for {categorical_variable}')\n plt.xlabel(categorical_variable)\n plt.ylabel('Count')\n plt.show()\n \n#d. Plot scatter plots for two specified numeric variables (input provided)\n def plot_scatter(self, x_variable, y_variable):\n if self.data is None:\n raise ValueError(\"Data not loaded yet\")\n \n if x_variable not in self.data.columns or y_variable not in self.data.columns:\n raise ValueError(\"One or both of the specified variables not found in dataset\")\n \n plt.scatter(self.data[x_variable], self.data[y_variable])\n plt.title(f'Scatter Plot: {x_variable} vs {y_variable}')\n plt.xlabel(x_variable)\n plt.ylabel(y_variable)\n plt.show()\n\n\n# Example\ndata_analyzer = DataAnalyzer('BostonHousing.csv')\ndata_analyzer.load_data()\n\ndata_analyzer.summarize_numeric_variables()\n\ndata_analyzer.generate_bar_graph('chas')\n\ndata_analyzer.plot_scatter('rm', 'age')","repo_name":"daniya692/DataBricks_Training","sub_path":"Assignments/Daniya_Assignment_2.4 OOPS/Daniya_Assignment 2.4 OOPS/Daniya_assignment_2.4_OOPS.py","file_name":"Daniya_assignment_2.4_OOPS.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39675312517","text":"import sys\ninput = sys.stdin.readline\n\ndef input_list_numbers():\n return(list(map(int, input().split())))\n\nt = int(input())\n\nentradas = []\n\nfor i in range(t):\n entradas.append(input())\n\nfor entrada in entradas:\n r = entrada[:2]\n for i in range(2, len(entrada) - 1, 2):\n r = r + entrada[i + 1]\n print(r)","repo_name":"luis-herasme/Algoritmos-CodeForces","sub_path":"ShortSubstrings.py","file_name":"ShortSubstrings.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71084240905","text":"import configparser\nimport pybitflyer\n\nclass FlyerControl():\n #コンストラクト\n def __init__(self):\n #configparserのインスタス\n config_ini = configparser.ConfigParser()\n config_ini.read('setting.ini', encoding='utf-8')\n\n #設定ファイルからAPIキーとシークレットキーを取得\n api = config_ini['bitflyer']['api']\n secret = config_ini['bitflyer']['secret']\n\n #bitflyerオブジェクトの生成\n self.API = pybitflyer.API(api, secret)\n\n #今のbitcoinの値段を教えてくれる\n def BtcPrice(self):\n return self.API.ticker(product_code='BTC_JPY')['ltp']\n\n #現在の手数料を取得\n def BtcCommission(self):\n return self.API.gettradingcommission(product_code='BTC_JPY')['commission_rate']\n\n #日本円残高を取得\n def Balance(self):\n result = self.API.getbalance()[0]['amount']\n return int(result)\n \n #Btc残高を取得\n def BtcBalance(self):\n result = self.API.getbalance()[1]['amount']\n return result\n\n #BTC成行買い\n def BuyBtc(self, amt):\n result = self.API.sendchildorder(product_code='BTC_JPY',\\\n child_order_type='MARKET',\\\n side='BUY',\\\n size=amt,\\\n minute_to_expire=10,\\\n time_in_force='GTC')\n return result\n \n #BTC成行売り\n def SellBtc(self, amt):\n result = self.API.sendchildorder(product_code='BTC_JPY',\\\n child_order_type='MARKET',\\\n side='SELL',\\\n size=amt,\\\n minute_to_expire=10,\\\n time_in_force='GTC')\n return result\n","repo_name":"UtsumiYoji/AutoBitCoinTrade","sub_path":"FlyerControl.py","file_name":"FlyerControl.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4545357141","text":"#!usr/bin/python3\n\ndef solve():\n # here we are just going to creat a super long string...\n i = 0\n length = 0\n string = ''\n while length <= 30 + 10**6:\n string += str(i)\n i += 1\n length += len(str(i))\n\n prod = 1\n print(len(string))\n for i in range(7):\n print(string[10**i])\n prod *= int(string[10**i])\n return prod\n\nprint(solve())\n","repo_name":"dwillist/ProjectEuler","sub_path":"26-50/Euler40.py","file_name":"Euler40.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27565198829","text":"n = int(input())\na = list(map(int, input().split()))\na.sort()\na.reverse()\nans = []\nflag = False\nfor i in range(len(a) - 1):\n if flag == 1:\n flag = -1\n continue\n if a[i] == a[i+1]:\n ans.append(a[i])\n if flag == -1:\n break\n flag = 1\nif len(ans) == 2:\n print(ans[0] * ans[1])\nelse:\n print(0)","repo_name":"Yuta123456/AtCoder","sub_path":"python/AtCoder Beginner Contest 071/Make a Rectangle.py","file_name":"Make a Rectangle.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5478910309","text":"from typing import Union\n\nimport numpy as np\n\nfrom RiskLabAI.data.structures.abstract_bars import AbstractBars\n\n\nclass StandardBars(AbstractBars):\n \"\"\"\n Concrete class that contains the properties which are shared between all various type of standard bars (dollar, volume, tick).\n \"\"\"\n\n def __init__(\n self,\n bar_type: str,\n threshold: float = 50000,\n ):\n \"\"\"\n StandardBars constructor function\n :param bar_type: type of bar. e.g. dollar_standard_bars, tick_standard_bars etc.\n :param threshold: threshold that used to sampling process\n \"\"\"\n\n AbstractBars.__init__(self, bar_type)\n self.threshold = threshold\n\n def construct_bars_from_data(self, data: Union[list, tuple, np.ndarray]) -> list:\n \"\"\"\n The function is used to construct bars from input ticks data.\n :param data: tabular data that contains date_time, price, and volume columns\n :return: constructed bars\n \"\"\"\n\n bars_list = []\n for tick_data in data:\n self.tick_counter += 1\n\n date_time, price, volume = tuple(tick_data)\n tick_rule = self._tick_rule(price)\n self.update_base_fields(price, tick_rule, volume)\n\n # is construction condition met to construct next bar or not\n threshold = self.threshold\n is_construction_condition_met = self._bar_construction_condition(threshold)\n if is_construction_condition_met:\n next_bar = self._construct_next_bar(\n date_time,\n self.tick_counter,\n price,\n self.high_price,\n self.low_price,\n threshold,\n )\n\n bars_list.append(next_bar)\n self._reset_cached_fields()\n\n return bars_list\n\n def _bar_construction_condition(self, threshold) -> bool:\n \"\"\"\n Compute the condition of whether next bar should sample with current and previous tick datas or not.\n :return: whether next bar should form with current and previous tick datas or not.\n \"\"\"\n\n return self.base_statistics[self.bar_type] >= threshold\n","repo_name":"RiskLabAI/RiskLabAI.py","sub_path":"RiskLabAI/data/structures/standard_bars.py","file_name":"standard_bars.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"62"} +{"seq_id":"37662812680","text":"#!/usr/bin/env python\n\nfrom CoordinateCalculator import CoordinateCalculator\nimport time\n\nattitude_reader_port = \"/dev/ttyUSB0\"\nanchors = []\nanchors.append({\"x\": -.92, \"y\": 0, \"z\": 0, \"serial_port\": \"/dev/ttyUSB1\", \"bias\": -.5})\nanchors.append({\"x\": 0, \"y\": .72, \"z\": 0, \"serial_port\": \"/dev/ttyUSB2\", \"bias\": -.0})\nanchors.append({\"x\": .92, \"y\": 0, \"z\": 0, \"serial_port\": \"/dev/ttyUSB3\", \"bias\": -.0})\n#anchors.append({\"x\": 0, \"y\": -.45, \"z\": .78, \"serial_port\": \"/dev/ttyUSB4\"})\n\ncoordinate_calculator = CoordinateCalculator(attitude_reader_port, anchors)\ntime.sleep(5)\n\nwhile True:\n try:\n location = coordinate_calculator.get_location()\n print(\"Derived locations: \")\n print(str(location))\n except Exception as e:\n print(\"Unable to derive locations: \" + str(e))\n time.sleep(.5)\n","repo_name":"BombD123/CoordinateCalculator","sub_path":"test_coordinates.py","file_name":"test_coordinates.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"33933313285","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 20 15:39:02 2021\n\n@author: micaelavieira\n\"\"\"\n\n\"\"\"\nSimple script to show that in the hdf5 file created by ioHub all information\nare present\"\"\"\n\n\nimport h5py\n\niohub_output = 'events.hdf5'\n\ndef printall(name, obj):\n if name in ['data_collection/events/mouse/MouseInputEvent', 'data_collection/events/experiment/MessageEvent']:\n attributes = dict(obj.attrs)\n print(attributes)\n\nwith h5py.File(iohub_output,'r') as hf:\n hf.visititems(printall)\n\nwith h5py.File(iohub_output, 'r') as f:\n data_collection = f['data_collection']\n events = data_collection['events']\n experiment = events['experiment']\n MessageEvent = experiment['MessageEvent'][()]\n mouse = events['mouse']\n MouseInputEvent = mouse['MouseInputEvent'][()]\n \n \n\nprint(MessageEvent)\n#print(MouseInputEvent)","repo_name":"micaela-vieira/EyeTracking-experiment","sub_path":"check_output_content.py","file_name":"check_output_content.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2948061402","text":"import trie\nimport time\n\n\"\"\"Change this path to use another word list.\nThe word list should be of format:\n word1\n word2\n ...\n wordN\n\"\"\"\ndict_file = \"../words/english-words-95.txt\"\n\ndef build_trie():\n t = trie.Trie()\n with open(dict_file, \"r\") as f:\n for l in f:\n t.insert(l.strip())\n return t\n\ndef flatten_results(lst):\n return (\"\".join(x) for x in lst)\n\ndef main():\n print(\"Building word list, please wait...\")\n t = None\n try:\n t = build_trie()\n except:\n print(\"Could not build word list.\")\n print(\"Completed.\")\n print()\n\n try:\n while True:\n text = input(\"find> \")\n \n ssec = time.clock()\n results = flatten_results(t.find_by_prefix(text))\n esec = time.clock()\n\n length = 0\n for word in results:\n print(word)\n length += 1\n\n print(\"Found {1} words. Query time about {0}s.\".format(esec - ssec, length))\n except (KeyboardInterrupt, EOFError):\n print(\"Good bye man.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"skurmedel/trie","sub_path":"python/dict_example.py","file_name":"dict_example.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"71776381636","text":"import os\ndata_root_dir='data'# path to a folder used to store the input and output of this repo\ndataset='cifar10'# [cifar10,??]\nimage_side_length=32 # 32 for cifar10\nchannes=3# 3 for rgb images\nnum_classes=6 # number of classes learned duing training. \n# In case of cifar 10, 6 classes are used for training, 4 classes are outliers\n\n#----------don't change below-----------#\nsaved_dhr_model=os.path.join(data_root_dir,'saved_dhr_model')\nsaved_feature_dir=os.path.join(data_root_dir,'saved_features')\nsaved_MAV_dir=os.path.join(data_root_dir,'saved_MAVs')\nsaved_distance_scores=os.path.join(data_root_dir,'saved_distance_scores')\nsaved_weibull_model=os.path.join(data_root_dir,'saved_weibull_model')\nsaved_openset_scores=os.path.join(data_root_dir,'saved_openset_scores')\n\nper_stage_settings={\n \"convert_cifar10_input_2_fit_torch\":{\n \"input_data_dir\":os.path.join(data_root_dir,'input_data_cifar10','train'),\n \"trainLabels_csv_path\":os.path.join(data_root_dir,'input_data_cifar10',\"trainLabels.csv\"),\n \"torch_training_data_dir\":os.path.join(data_root_dir,'cifar10','train'),\n \"torch_validation_data_dir\":os.path.join(data_root_dir,'cifar10','val'),\n \"torch_testing_data_dir\":os.path.join(data_root_dir,'cifar10','test'),\n },\n \"train_dhr\":{\n \"lr\":0.0001,\n \"epochs\":5,\n 'batch_size':60,\n \"save_path\":os.path.join(saved_dhr_model,dataset),\n \"dataset_dir\":os.path.join(data_root_dir,dataset),\n \"num_classes\":num_classes,\n \"means\":[0.4914, 0.4822, 0.4465],#\"channelwise means for normalization\"\n \"stds\":[0.2023, 0.1994, 0.2010],#\"channelwise std for normalization\"\n \"momentum\":0.9,\n \"weight_decay\":0.0005,\n \"image_side_length\":image_side_length,\n \"channes\":channes,\n \"load_and_continue\":True\n },\n \"get_model_features\":{\n \"dataset_dir\":os.path.join(data_root_dir,dataset),\n \"num_classes\":num_classes,\n \"means\":[0.4914, 0.4822, 0.4465],#\"channelwise means for normalization\"\n \"stds\":[0.2023, 0.1994, 0.2010],#\"channelwise std for normalization\"\n \"save_path\":os.path.join(saved_feature_dir,dataset),\n \"load_path\":os.path.join(saved_dhr_model,dataset,\"best_val_acc.pth\"),\n \"image_side_length\":image_side_length,\n \"channes\":channes\n },\n \"MAV_Compute\":{\n \"save_path\":os.path.join(saved_MAV_dir,dataset),\n \"feature_dir\":os.path.join(saved_feature_dir,dataset),\n \"dataset_dir\":os.path.join(data_root_dir,dataset),\n \"num_classes\":num_classes,\n },\n \"compute_distances\":{\n \"MAV_path\":os.path.join(saved_MAV_dir,dataset),\n \"save_path\":os.path.join(saved_distance_scores,dataset),\n \"feature_dir\":os.path.join(saved_feature_dir,dataset),\n \"dataset_dir\":os.path.join(data_root_dir,dataset),\n },\n \"weibull_fitting\":{\n \"MAV_path\":os.path.join(saved_MAV_dir,dataset),\n \"distance_scores_path\":os.path.join(saved_distance_scores,dataset),\n \"weibull_tail_size\":35,\n \"save_path\":os.path.join(saved_weibull_model,dataset),\n \"distance_type\":\"eucos\"\n },\n \"compute_openmax\":{\n \"weibull_save_path\":os.path.join(saved_weibull_model,dataset),\n \"alpha_rank\":num_classes,# alpha_rank <= num_classes\n \"feature_dir\":os.path.join(saved_feature_dir,dataset),\n \"num_classes\":num_classes,\n \"dataset_dir\":os.path.join(data_root_dir,dataset),\n \"save_path\":os.path.join(saved_openset_scores,dataset)\n },\n \"analyze_results\":{\n \"saved_openset_scores\":os.path.join(saved_openset_scores,dataset)\n }\n}\n\n","repo_name":"elite3312/CROSR-PyTorch-CIFAR10","sub_path":"configs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"62"} +{"seq_id":"29421034655","text":"import pytest\nfrom binary_tree import BinaryTree, Node\nfrom tree_breadth_first import breadth_first\nfrom binary_search_tree import BinarySearchTree\n\n\n# @pytest.mark.skip(\"TODO\")\ndef test_exists():\n assert breadth_first\n\n\n# @pytest.mark.skip(\"TODO\")\ndef test_rootless_tree():\n tree = BinaryTree()\n expected = []\n actual = breadth_first(tree)\n assert actual == expected\n\n\n# @pytest.mark.skip(\"TODO\")\ndef test_single_node():\n tree = BinaryTree()\n tree.root = Node(\"apples\")\n expected = [\"apples\"]\n actual = breadth_first(tree)\n assert actual == expected\n\n\n# @pytest.mark.skip(\"TODO\")\ndef test_two_nodes():\n tree = BinaryTree()\n tree.root = Node(\"apples\")\n tree.root.right = Node(\"bananas\")\n expected = [\"apples\", \"bananas\"]\n actual = breadth_first(tree)\n assert actual == expected\n\n\n# @pytest.mark.skip(\"TODO\")\ndef test_four_nodes():\n tree = BinaryTree()\n tree.root = Node(\"apples\")\n tree.root.left = Node(\"bananas\")\n tree.root.right = Node(\"cucumbers\")\n tree.root.right.right = Node(\"dates\")\n expected = [\"apples\", \"bananas\", \"cucumbers\", \"dates\"]\n actual = breadth_first(tree)\n assert actual == expected\n\n\n# @pytest.mark.skip(\"TODO\")\ndef test_example_from_reading():\n \"\"\"\n We build these out by hand because the example has some gaps\n i.e. it is not added to left-to-right\n\n 2\n 7 5\n 2 6 9\n 5 11 4\n\n result = [2,7,5,2,6,9,5,11,4]\n \"\"\"\n tree = BinaryTree()\n\n level_0 = Node(2)\n level_1_first = Node(7)\n level_1_second = Node(5)\n\n level_2_first = Node(2)\n level_2_second = Node(6)\n level_2_third = Node(9)\n\n level_3_first = Node(5)\n level_3_second = Node(11)\n level_3_third = Node(4)\n\n tree.root = level_0\n level_0.left = level_1_first\n level_0.right = level_1_second\n level_1_first.left = level_2_first\n level_1_first.right = level_2_second\n level_1_second.right = level_2_third\n\n level_2_second.left = level_3_first\n level_2_second.right = level_3_second\n\n level_2_third.right = level_3_third\n\n expected = [2, 7, 5, 2, 6, 9, 5, 11, 4]\n actual = breadth_first(tree)\n\n assert actual == expected\n\n\ndef test_tree_letters():\n b = BinaryTree()\n b.root = Node(\"a\")\n b.root.left = Node(\"b\")\n b.root.right = Node(\"c\")\n b.root.left.left = Node(\"d\")\n b.root.left.right = Node(\"e\")\n b.root.right.left = Node(\"f\")\n b.root.right.right = Node(\"g\")\n expected = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"]\n actual = breadth_first(b)\n assert actual == expected\n\n\ndef test_bst():\n b = BinarySearchTree()\n b.add(9)\n b.add(7)\n b.add(5)\n b.add(6)\n b.add(10)\n b.add(18)\n b.add(14)\n b.add(11)\n b.add(2)\n b.add(20)\n expected = [9, 7, 10, 5, 18, 2, 6, 14, 20, 11]\n actual = breadth_first(b)\n assert actual == expected\n\n","repo_name":"KajeTheCat/data-structures-and-algorithms","sub_path":"python/code_challenges/tree/test_tree_breadth_first.py","file_name":"test_tree_breadth_first.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12405060280","text":"import os\nimport sys\nimport pathlib\nimport argparse\nimport configparser\nimport numpy as np\nfrom collections import OrderedDict\nfrom pathlib import Path\n\nfrom mosaic import log\nfrom mosaic import util\n\nLOGS_HOME = Path.home()/'logs'\nCONFIG_FILE_NAME = os.path.join(str(pathlib.Path.home()), 'mosaic.conf')\n\nSHIFT_H_FILE_NAME = os.path.join(str(pathlib.Path.home()), 'shifts_h.txt')\nSHIFT_V_FILE_NAME = os.path.join(str(pathlib.Path.home()), 'shifts_v.txt')\n\nSECTIONS = OrderedDict()\n\nSECTIONS['general'] = {\n 'config': {\n 'default': CONFIG_FILE_NAME,\n 'type': str,\n 'help': \"File name of configuration\",\n 'metavar': 'FILE'},\n 'logs-home': {\n 'default': LOGS_HOME,\n 'type': str,\n 'help': \"Log file directory\",\n 'metavar': 'FILE'},\n 'verbose': {\n 'default': True,\n 'help': 'Verbose output',\n 'action': 'store_true'},\n }\n\nSECTIONS['file-io'] = {\n 'folder-name': {\n 'default': '.',\n 'type': Path,\n 'help': \"Name of the last used directory containing multiple hdf files\",\n 'metavar': 'PATH'},\n 'tmp-file-name': {\n 'default': '/tile/tmp.h5',\n 'type': str,\n 'help': \"Default output file name\",\n 'metavar': 'FILE'},\n 'tile-file-name': {\n 'default': 'tile.h5',\n 'type': str,\n 'help': \"Default stitched file name\",\n 'metavar': 'FILE'},\n 'file-format': {\n 'default': 'dx',\n 'type': str,\n 'help': \"see from https://dxchange.readthedocs.io/en/latest/source/demo.html\",\n 'choices': ['dx', 'aps2bm', 'aps7bm', 'aps32id']},\n 'binning': {\n 'type': util.positive_int,\n 'default': 0,\n 'help': \"Reconstruction binning factor as power(2, choice)\",\n 'choices': [0, 1, 2, 3]},\n 'sample-x': { \n 'type': str,\n 'default': '/measurement/instrument/sample_motor_stack/setup/x',\n 'help': \"Location in the hdf tomography layout where to find the tile x position (mm)\"}, \n 'sample-y': { \n 'type': str,\n 'default': '/measurement/instrument/sample_motor_stack/setup/y',\n 'help': \"Location in the hdf tomography layout where to find the tile y position (mm)\"}, \n 'resolution': { \n 'type': str,\n 'default': '/measurement/instrument/detection_system/objective/resolution',\n 'help': \"Location in the hdf tomography layout where to find the image resolution (um)\"}, \n 'full_file_name': { \n 'type': str,\n 'default': '/measurement/sample/file/full_name',\n 'help': \"Location in the hdf tomography layout where to find the full file name\"},\n 'step-x': {\n 'default': 0,\n 'type': float,\n 'help': 'When greater than 0, it is used to manually overide the sample x step size stored in the hdf file'}, \n 'chunk-size': { \n 'type': int,\n 'default': 64,\n 'help': \"Number of of projections for simultaneous processing\",}, \n }\n\nSECTIONS['stitch'] = {\n 'test': {\n 'default': False,\n 'help': 'if set one projection called mosaic_test will be stitched and placed in raw data folded',\n 'action': 'store_true'},\n 'nprojection': { \n 'type': float,\n 'default': 0.5,\n 'help': \"Projection used for the stitching test\",},\n }\n\nSECTIONS['shift'] = { \n 'threshold': {\n 'default': 0.5,\n 'type': float,\n 'help': 'Threshold for selecting matching features (0,1)'},\n 'nprojection': {\n 'type': float,\n 'default': 0.5,\n 'help': \"Projection number (0,1)\"}, \n }\n\nSECTIONS['extract'] = {\n 'nprojection': {\n 'type': float,\n 'default': 0.5,\n 'help': \"Projection number (0,1)\"}, \n }\n\nMOSAIC_PARAMS = ('file-io', )\nSTITCH_PARAMS = ('file-io', 'stitch')\nSHIFT_PARAMS = ('file-io', 'shift')\nEXTRACT_PARAMS = ('file-io', 'extract')\n\nNICE_NAMES = ('General', 'File IO', 'Stitch')\n\ndef get_config_name():\n \"\"\"Get the command line --config option.\"\"\"\n name = CONFIG_FILE_NAME\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--config'):\n if arg == '--config':\n return sys.argv[i + 1]\n else:\n name = sys.argv[i].split('--config')[1]\n if name[0] == '=':\n name = name[1:]\n return name\n\n return name\n\n\ndef parse_known_args(parser, subparser=False):\n \"\"\"\n Parse arguments from file and then override by the ones specified on the\n command line. Use *parser* for parsing and is *subparser* is True take into\n account that there is a value on the command line specifying the subparser.\n \"\"\"\n if len(sys.argv) > 1:\n subparser_value = [sys.argv[1]] if subparser else []\n config_values = config_to_list(config_name=get_config_name())\n values = subparser_value + config_values + sys.argv[1:]\n #print(subparser_value, config_values, values)\n else:\n values = \"\"\n\n return parser.parse_known_args(values)[0]\n\n\ndef config_to_list(config_name=CONFIG_FILE_NAME):\n \"\"\"\n Read arguments from config file and convert them to a list of keys and\n values as sys.argv does when they are specified on the command line.\n *config_name* is the file name of the config file.\n \"\"\"\n result = []\n config = configparser.ConfigParser()\n\n if not config.read([config_name]):\n return []\n\n for section in SECTIONS:\n for name, opts in ((n, o) for n, o in SECTIONS[section].items() if config.has_option(section, n)):\n value = config.get(section, name)\n\n if value != '' and value != 'None':\n action = opts.get('action', None)\n\n if action == 'store_true' and value == 'True':\n # Only the key is on the command line for this action\n result.append('--{}'.format(name))\n\n if not action == 'store_true':\n if opts.get('nargs', None) == '+':\n result.append('--{}'.format(name))\n result.extend((v.strip() for v in value.split(',')))\n else:\n result.append('--{}={}'.format(name, value))\n\n return result\n\n\nclass Params(object):\n def __init__(self, sections=()):\n self.sections = sections + ('general', )\n\n def add_parser_args(self, parser):\n for section in self.sections:\n for name in sorted(SECTIONS[section]):\n opts = SECTIONS[section][name]\n parser.add_argument('--{}'.format(name), **opts)\n\n def add_arguments(self, parser):\n self.add_parser_args(parser)\n return parser\n\n def get_defaults(self):\n parser = argparse.ArgumentParser()\n self.add_arguments(parser)\n\n return parser.parse_args('')\n\n\ndef write(config_file, args=None, sections=None):\n \"\"\"\n Write *config_file* with values from *args* if they are specified,\n otherwise use the defaults. If *sections* are specified, write values from\n *args* only to those sections, use the defaults on the remaining ones.\n \"\"\"\n config = configparser.ConfigParser()\n\n for section in SECTIONS:\n config.add_section(section)\n for name, opts in SECTIONS[section].items():\n if args and sections and section in sections and hasattr(args, name.replace('-', '_')):\n value = getattr(args, name.replace('-', '_'))\n if isinstance(value, list):\n # print(type(value), value)\n value = ', '.join(value)\n else:\n value = opts['default'] if opts['default'] is not None else ''\n\n prefix = '# ' if value == '' else ''\n\n if name != 'config':\n config.set(section, prefix + name, str(value))\n with open(config_file, 'w') as f:\n config.write(f)\n\n\ndef log_values(args):\n \"\"\"Log all values set in the args namespace.\n\n Arguments are grouped according to their section and logged alphabetically\n using the DEBUG log level thus --verbose is required.\n \"\"\"\n args = args.__dict__\n\n for section, name in zip(SECTIONS, NICE_NAMES):\n entries = sorted((k for k in args.keys() if k in SECTIONS[section]))\n if entries:\n log.info(name)\n for entry in entries:\n value = args[entry] if args[entry] is not None else \"-\"\n log.info(\" {:<16} {}\".format(entry, value))\n\n\ndef show_config(args):\n \"\"\"Log all values set in the args namespace.\n\n Arguments are grouped according to their section and logged alphabetically\n using the DEBUG log level thus --verbose is required.\n \"\"\"\n args = args.__dict__\n\n log.warning('mosaic status start')\n for section, name in zip(SECTIONS, NICE_NAMES):\n entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section]))\n if entries:\n for entry in entries:\n value = args[entry] if args[entry] != None else \"-\"\n log.info(\" {:<16} {}\".format(entry, value))\n\n log.warning('mosaic status end')\n \n","repo_name":"nikitinvv/mosaic","sub_path":"mosaic/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"20511924203","text":"def insertion_sort(L):\n for i in range(1, len(L)): # 5 steps - len/range 1 step, for 3 steps\n current_val = L[i] # 2 steps - selection 1 step, assignment 1 step\n j = i # 1 steps - assignment 1 step\n while (j > 0 and L[j - 1] > current_val): # 11 steps - comparison 2 step, subtraction 1 step, selection 1 step, while 3 steps\n L[j] = L[j - 1] # 4 steps - subtraction 1 step, selection 1 step\n j = j - 1 # 2 steps\n L[j] = current_val # 2 steps\n\n # worst case analysis\n # line 5-7 may happen up to n times: (11 + 4 + 2) * n = 17n\n # line 2-4 + line 5-7 + line 8 may happen up to n times: (5 + 2 + 1 +17n + 2) * n = 17n^2 + 10n\n # T(n) = 17n^2 + 10n\n\n\nif (__name__ == \"__main__\"):\n L = [5, 7, 1, 9, 3, 2, 0, 4, 6, 8]\n insertion_sort(L)\n print(L)\n","repo_name":"x-meowzilla/CSCA08-A48","sub_path":"CSCA48/10_Complexity_Analysis/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"62"} +{"seq_id":"19203782580","text":"#!/bin/python3\nimport numpy as np\nimport argparse\n\n###########\n## INPUT ##\n###########\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"create data of degree n\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--n\",type=int,required=True,help=\"create polynomial of highest degree n (x^1+...+x^n)\")\n parser.add_argument(\"--l\",type=int,required=False,help=\"number of data points\",default=600)\n \n args = parser.parse_args()\n return args\n\n##########\n## MAIN ##\n##########\n\nargs = get_args()\nN = args.n\nprint(N)\nLENGTH = args.l\n\nx = np.linspace(-5,5,LENGTH)\ncoeffs = np.random.uniform(-20,20,N+1) # Define a random polynomial of highest degree n\ny = np.poly1d(coeffs)(x)+np.random.uniform(-20,20,LENGTH) # Get polynomial points and add uniform noise\n\nOUT = open('n'+str(N)+'.txt','w') # Print the data\nfor i in range(len(x)):\n OUT.write('%f\\t%f\\n' % (x[i],y[i]))\n","repo_name":"benjaminwsebastian/OVERFITTING_LEAST_SQUARES","sub_path":"scripts/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6239098934","text":"import json\r\n\r\ndef read_result(filePath = \"./result/result.txt\"):\r\n with open(filePath, 'r') as f:\r\n dict = json.loads(f.read().strip())\r\n print('识别成功,内容为:' + dict['result'][0])\r\n music = str(dict['result'][0]).split('播放')[1]\r\n return music\r\n\r\ndef main():\r\n read_result()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ECherr/python","sub_path":"json_parse.py","file_name":"json_parse.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"72743475717","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import defaultdict\n\nnums = list(map(int, input().split(\" \")))\ngroupA = defaultdict(list)\ngroupB = []\n\nfor i in range(nums[0]):\n word = input()\n groupA[word].append(str(i+1))\nfor _ in range(nums[1]):\n groupB.append(input())\n# print(groupA)\n\n\nfor word in groupB:\n\n if word in groupA:\n print(\" \".join(groupA[word]))\n else:\n print(-1)\n","repo_name":"Yabsera-Haile/A2SV","sub_path":"Week 2/DefaultDict.py","file_name":"DefaultDict.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"29814261548","text":"import logging\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\nfrom dataclasses import dataclass\n\nimport torch\nfrom fairseq.data import (\n ConcatDataset,\n Dictionary,\n FairseqDataset,\n ResamplingDataset\n)\nfrom fairseq.data.audio.data_cfg import S2TDataConfig\nfrom fairseq.data.audio.speech_to_text_dataset import (\n SpeechToTextDatasetItem,\n SpeechToTextDataset,\n SpeechToTextDatasetCreator\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass SpeechToTextDatasetItemWithDomain(SpeechToTextDatasetItem):\n src_lang_id: Optional[torch.Tensor] = None\n tgt_lang_id: Optional[torch.Tensor] = None\n domain_id: Optional[torch.Tensor] = None\n\n\nclass SpeechToTextDatasetWithDomain(SpeechToTextDataset):\n\n def __init__(\n self,\n split: str,\n is_train_split: bool,\n cfg: S2TDataConfig,\n audio_paths: List[str],\n n_frames: List[int],\n src_texts: Optional[List[str]] = None,\n tgt_texts: Optional[List[str]] = None,\n speakers: Optional[List[str]] = None,\n src_langs: Optional[List[str]] = None,\n tgt_langs: Optional[List[str]] = None,\n ids: Optional[List[str]] = None,\n tgt_dict: Optional[Dictionary] = None,\n pre_tokenizer=None,\n bpe_tokenizer=None,\n n_frames_per_step=1,\n speaker_to_id=None,\n src_lang_ids: Optional[List[int]] = None,\n tgt_lang_ids: Optional[List[int]] = None,\n domain_ids: Optional[List[int]] = None\n ):\n super().__init__(\n split, is_train_split, cfg, audio_paths, n_frames,\n src_texts, tgt_texts, speakers, src_langs, tgt_langs,\n ids, tgt_dict, pre_tokenizer, bpe_tokenizer,\n n_frames_per_step, speaker_to_id\n )\n assert src_lang_ids is None or len(src_lang_ids) == self.n_samples\n assert tgt_lang_ids is None or len(tgt_lang_ids) == self.n_samples\n assert domain_ids is None or len(domain_ids) == self.n_samples\n\n self.src_lang_ids = src_lang_ids\n self.tgt_lang_ids = tgt_lang_ids\n self.domain_ids = domain_ids\n\n def __getitem__(self, index: int) -> SpeechToTextDatasetItemWithDomain:\n item = super().__getitem__(index)\n src_lang_id = self.src_lang_ids[index]\n tgt_lang_id = self.tgt_lang_ids[index]\n domain_id = self.domain_ids[index]\n return SpeechToTextDatasetItemWithDomain(\n index=item.index, source=item.source,\n target=item.target, speaker_id=item.speaker_id,\n src_lang_id=src_lang_id,\n tgt_lang_id=tgt_lang_id,\n domain_id=domain_id\n )\n\n def collater(\n self, samples: List[SpeechToTextDatasetItem], return_order: bool = False\n ) -> Dict:\n if len(samples) == 0:\n return {}\n out = super().collater(samples, return_order=True)\n order = out[\"order\"]\n src_lang_ids = torch.tensor([x.src_lang_id for x in samples], dtype=torch.long).index_select(0, order)\n tgt_lang_ids = torch.tensor([x.tgt_lang_id for x in samples], dtype=torch.long).index_select(0, order)\n domain_ids = torch.tensor([x.domain_id for x in samples], dtype=torch.long).index_select(0, order)\n\n out[\"src_lang_ids\"] = src_lang_ids\n out[\"tgt_lang_ids\"] = tgt_lang_ids\n out[\"domain_ids\"] = domain_ids\n if not return_order:\n del out[\"order\"]\n return out\n\n\nclass SpeechToTextDatasetCreatorWithDomain(SpeechToTextDatasetCreator):\n KEY_SRC_LANG_ID, KEY_TGT_LANG_ID = \"src_lang_id\", \"tgt_lang_id\"\n KEY_DOMAIN_ID = \"domain_id\"\n # default values\n DEFAULT_SRC_LANG_ID, DEFAULT_TGT_LANG_ID, DEFAULT_DOMAIN_ID = 0, 0, 0\n\n @classmethod\n def _from_list(\n cls,\n split_name: str,\n is_train_split,\n samples: List[Dict],\n cfg: S2TDataConfig,\n tgt_dict,\n pre_tokenizer,\n bpe_tokenizer,\n n_frames_per_step,\n speaker_to_id\n ) -> SpeechToTextDatasetWithDomain:\n audio_root = Path(cfg.audio_root)\n ids = [s[cls.KEY_ID] for s in samples]\n audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]\n n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]\n tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples]\n src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]\n speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]\n src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]\n tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]\n src_lang_ids = [s.get(cls.KEY_SRC_LANG_ID, cls.DEFAULT_SRC_LANG_ID) for s in samples]\n tgt_lang_ids = [s.get(cls.KEY_TGT_LANG_ID, cls.DEFAULT_TGT_LANG_ID) for s in samples]\n domain_ids = [s.get(cls.KEY_DOMAIN_ID, cls.DEFAULT_DOMAIN_ID) for s in samples]\n return SpeechToTextDatasetWithDomain(\n split_name,\n is_train_split,\n cfg,\n audio_paths,\n n_frames,\n src_texts=src_texts,\n tgt_texts=tgt_texts,\n speakers=speakers,\n src_langs=src_langs,\n tgt_langs=tgt_langs,\n ids=ids,\n tgt_dict=tgt_dict,\n pre_tokenizer=pre_tokenizer,\n bpe_tokenizer=bpe_tokenizer,\n n_frames_per_step=n_frames_per_step,\n speaker_to_id=speaker_to_id,\n src_lang_ids=src_lang_ids,\n tgt_lang_ids=tgt_lang_ids,\n domain_ids=domain_ids\n )\n\n @classmethod\n def _load_samples_from_tsv(\n cls,\n root: str,\n split: str,\n src_lang_map,\n tgt_lang_map,\n domain_map\n ):\n # metadata from split\n _, src_lang, tgt_lang, domain = split.split(\"_\")\n src_lang_id = src_lang_map[src_lang]\n tgt_lang_id = tgt_lang_map[tgt_lang]\n domain_id = domain_map[domain]\n\n samples = SpeechToTextDatasetCreator._load_samples_from_tsv(root, split)\n for s in samples:\n s.update({\n cls.KEY_SRC_LANG_ID: src_lang_id,\n cls.KEY_TGT_LANG_ID: tgt_lang_id,\n cls.KEY_DOMAIN_ID: domain_id\n })\n return samples\n\n @classmethod\n def _from_tsv(\n cls,\n root: str,\n cfg: S2TDataConfig,\n split: str,\n tgt_dict,\n is_train_split: bool,\n pre_tokenizer,\n bpe_tokenizer,\n n_frames_per_step,\n speaker_to_id,\n src_lang_map: Dict[str, int],\n tgt_lang_map: Dict[str, int],\n domain_map: Dict[str, int]\n ) -> SpeechToTextDatasetItemWithDomain:\n samples = cls._load_samples_from_tsv(\n root, split, src_lang_map,\n tgt_lang_map, domain_map\n )\n return cls._from_list(\n split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer,\n bpe_tokenizer, n_frames_per_step, speaker_to_id\n )\n\n @classmethod\n def from_tsv(\n cls,\n root: str,\n cfg: S2TDataConfig,\n splits: str,\n tgt_dict,\n pre_tokenizer,\n bpe_tokenizer,\n is_train_split: bool,\n epoch: int,\n seed: int,\n src_lang_map: Dict[str, int],\n tgt_lang_map: Dict[str, int],\n domain_map: Dict[str, int],\n n_frames_per_step: int = 1,\n speaker_to_id=None\n ) -> SpeechToTextDatasetWithDomain:\n datasets = [\n cls._from_tsv(\n root, cfg, split, tgt_dict, is_train_split, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, src_lang_map, tgt_lang_map, domain_map\n )\n for split in splits.split(\",\")\n ]\n\n if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0:\n # temperature-based sampling\n size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha)\n datasets = [\n ResamplingDataset(\n d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)\n )\n for r, d in zip(size_ratios, datasets)\n ]\n\n return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]\n","repo_name":"facebookresearch/fairseq","sub_path":"examples/attention_head_selection/src/data/speech_to_text_dataset_with_domain.py","file_name":"speech_to_text_dataset_with_domain.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":28050,"dataset":"github-code","pt":"62"} +{"seq_id":"20602859581","text":"\"\"\"empty message\n\nRevision ID: 8afb811f475c\nRevises:\nCreate Date: 2020-05-30 01:46:15.300787\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8afb811f475c'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('movies',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(), nullable=True),\n sa.Column('release_date', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('actors',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('age', sa.Integer(), nullable=True),\n sa.Column('gender', sa.String(), nullable=True),\n sa.Column('movie_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['movie_id'], ['movies.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('actors')\n op.drop_table('movies')\n # ### end Alembic commands ###\n","repo_name":"kemaltulum/udacity-fsnd-capstone","sub_path":"migrations/versions/8afb811f475c_.py","file_name":"8afb811f475c_.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"62"} +{"seq_id":"695791555","text":"from pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import decode, from_json, col, explode\r\nfrom pyspark.sql.types import StructType, StructField, FloatType, TimestampType, StringType\r\n\r\n\r\ndef main():\r\n # Open spark session\r\n spark = SparkSession.builder.master('local').getOrCreate()\r\n spark.sparkContext.setLogLevel('ERROR')\r\n\r\n schema = StructType([\r\n StructField('ts', StringType(), True),\r\n StructField('symbol', StringType(), True),\r\n StructField('price', FloatType(), True)\r\n ])\r\n\r\n input_stream = spark\\\r\n .read\\\r\n .format('kafka')\\\r\n .option('kafka.bootstrap.servers', 'kafka:9092')\\\r\n .option('kafka.group.id', 'group1')\\\r\n .option('subscribe', 'prices')\\\r\n .option('startingOffsets', 'earliest')\\\r\n .option('endingOffsets', 'latest')\\\r\n .load()\\\r\n .select(from_json(col(\"value\").cast(\"string\"), schema).alias(\"parsed_value\"))\r\n\r\n df = input_stream.select(\"parsed_value.*\")\r\n\r\n df.printSchema()\r\n\r\n # Summary stats\r\n df_summary = df.describe(['price'])\r\n df_summary.show()\r\n\r\n # Summary table into dictionary\r\n stats = dict([(row.summary, float(row.price))\r\n for row in df_summary.collect()])\r\n\r\n # Filter by 3*price std and show dataframe\r\n upper_bound = stats['mean'] + 3*stats['stddev']\r\n lower_bound = stats['mean'] - 3*stats['stddev']\r\n df.filter((df.price > upper_bound) | (\r\n df.price < lower_bound)).show(truncate=False)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"charliehpearce/kafka-spark","sub_path":"consumer/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21594468897","text":"#import libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n#import the neccessary module\r\nfrom helper import Model\r\nfrom helper import Auxiliary\r\n\r\n# Modelling\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n#Evaluating\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import r2_score\r\n\r\n\r\ndf = Model.get_csv('BPI_Challenge_2012.csv')\r\n#df_train, df_test = Auxiliary.train_test_split(Auxiliary.preprocess_data(Model.get_csv('BPI_Challenge_2012.csv')))\r\n#Preprocess data and then split it into train and test sets\r\ndf_train, df_test = Auxiliary.train_test_split(Auxiliary.preprocess_data(df))\r\n\r\nprint(df_train.columns)\r\n\r\n#only select needed columns\r\ndf_train = df_train[['org:resource', 'case:concept:name', 'concept:name', 'month', 'day', 'Week Day', 'Next Time', 'lifecycle:transition', \r\n 'A_SUBMITTED', 'A_PARTLYSUBMITTED', 'A_PREACCEPTED',\r\n 'W_Completeren aanvraag', 'A_ACCEPTED', 'O_SELECTED', 'A_FINALIZED',\r\n 'O_CREATED', 'O_SENT', 'W_Nabellen offertes', 'O_SENT_BACK',\r\n 'W_Valideren aanvraag', 'A_REGISTERED', 'A_APPROVED', 'O_ACCEPTED',\r\n 'A_ACTIVATED', 'O_CANCELLED', 'W_Wijzigen contractgegevens',\r\n 'A_DECLINED', 'A_CANCELLED', 'W_Afhandelen leads', 'O_DECLINED',\r\n 'W_Nabellen incomplete dossiers', 'W_Beoordelen fraude']]\r\ndf_test = df_test[['org:resource', 'case:concept:name', 'concept:name', 'month', 'day', 'Week Day', 'Next Time', 'lifecycle:transition', \r\n 'A_SUBMITTED', 'A_PARTLYSUBMITTED', 'A_PREACCEPTED',\r\n 'W_Completeren aanvraag', 'A_ACCEPTED', 'O_SELECTED', 'A_FINALIZED',\r\n 'O_CREATED', 'O_SENT', 'W_Nabellen offertes', 'O_SENT_BACK',\r\n 'W_Valideren aanvraag', 'A_REGISTERED', 'A_APPROVED', 'O_ACCEPTED',\r\n 'A_ACTIVATED', 'O_CANCELLED', 'W_Wijzigen contractgegevens',\r\n 'A_DECLINED', 'A_CANCELLED', 'W_Afhandelen leads', 'O_DECLINED',\r\n 'W_Nabellen incomplete dossiers', 'W_Beoordelen fraude']]\r\n\r\n#remove entries where there is NaN\r\ndf_train = df_train.dropna()\r\ndf_test = df_test.dropna()\r\ndf_train = df_train.replace(-1, 0)\r\ndf_test = df_test.replace(-1, 0)\r\n\r\n#q__train_low = df_train[\"Next Time\"].quantile(0.03)\r\nq__train_hi = df_train[\"Next Time\"].quantile(0.97)\r\n\r\n#df_train = df_train[(df_train[\"Next Time\"] < q__train_hi) & (df_train[\"Next Time\"] > q__train_low)]\r\ndf_train = df_train[df_train[\"Next Time\"] < q__train_hi]\r\n#q_test_low = df_test[\"Next Time\"].quantile(0.01)\r\nq_test_hi = df_test[\"Next Time\"].quantile(0.97)\r\n#df_test = df_test[(df_test[\"Next Time\"] < q_test_hi) & (df_test[\"Next Time\"] > q_test_low)]\r\ndf_test = df_test[df_test[\"Next Time\"] < q_test_hi]\r\n\r\n#split the data into training and test sets and drop some data\r\n#x_train = df_train[['org:resource', 'lifecycle:transition','concept:name','case:AMOUNT_REQ','month', 'day']]\r\nX_train = df_train[['org:resource', 'day', 'month', 'Week Day', 'lifecycle:transition', \r\n 'A_SUBMITTED', 'A_PARTLYSUBMITTED', 'A_PREACCEPTED',\r\n 'W_Completeren aanvraag', 'A_ACCEPTED', 'O_SELECTED', 'A_FINALIZED',\r\n 'O_CREATED', 'O_SENT', 'W_Nabellen offertes', 'O_SENT_BACK',\r\n 'W_Valideren aanvraag', 'A_REGISTERED', 'A_APPROVED', 'O_ACCEPTED',\r\n 'A_ACTIVATED', 'O_CANCELLED', 'W_Wijzigen contractgegevens',\r\n 'A_DECLINED', 'A_CANCELLED', 'W_Afhandelen leads', 'O_DECLINED',\r\n 'W_Nabellen incomplete dossiers', 'W_Beoordelen fraude']]\r\ny_train = df_train[['Next Time']]\r\n#x_test = df_test[['org:resource', 'lifecycle:transition','concept:name','case:AMOUNT_REQ','month', 'day']]\r\nX_test = df_test[['org:resource', 'day', 'month', 'Week Day', 'lifecycle:transition', \r\n 'A_SUBMITTED', 'A_PARTLYSUBMITTED', 'A_PREACCEPTED',\r\n 'W_Completeren aanvraag', 'A_ACCEPTED', 'O_SELECTED', 'A_FINALIZED',\r\n 'O_CREATED', 'O_SENT', 'W_Nabellen offertes', 'O_SENT_BACK',\r\n 'W_Valideren aanvraag', 'A_REGISTERED', 'A_APPROVED', 'O_ACCEPTED',\r\n 'A_ACTIVATED', 'O_CANCELLED', 'W_Wijzigen contractgegevens',\r\n 'A_DECLINED', 'A_CANCELLED', 'W_Afhandelen leads', 'O_DECLINED',\r\n 'W_Nabellen incomplete dossiers', 'W_Beoordelen fraude']]\r\ny_test = df_test[['Next Time']]\r\n\r\n#normalize y valuesf\r\n\r\nsplit_location = y_train.shape[0]\r\ny_df = pd.concat([y_train, y_test])\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nscaler = MinMaxScaler()\r\n\r\nscaler.fit(y_df)\r\n\r\ny_df = scaler.transform(y_df)\r\n\r\n#Resplit as numpy arrays\r\n#y_train = y_df[0:split_location]\r\n#y_test = y_df[split_location:]\r\n\r\nX_train = X_train.values\r\nX_test = X_test.values\r\n\r\ntemp_array = []\r\nreal_x_train = []\r\nreal_x_test = []\r\nfor index,value in enumerate(X_train):\r\n temp_array = []\r\n temp_array = value[0]\r\n temp_array = np.append(temp_array, value[1])\r\n temp_array = np.append(temp_array, value[2])\r\n real_x_train.append(temp_array)\r\nfor index,value in enumerate(X_test):\r\n temp_array = []\r\n temp_array = value[0]\r\n temp_array = np.append(temp_array, value[1])\r\n temp_array = np.append(temp_array, value[2])\r\n real_x_test.append(temp_array)\r\n\r\nx_train = np.array(real_x_train)\r\nx_test = np.array(real_x_test)\r\n\r\n\r\n#create an instance of a Linear Regression model and then fit this to our training data\r\nreg = LinearRegression()\r\nreg.fit(X_train, y_train)\r\n\r\n#predict the data\r\ny_pred_train = reg.predict(X_train)\r\ny_pred = reg.predict(X_test)\r\n\r\n#compute the accuracy of the predictor\r\nr2score_train = r2_score(y_train, y_pred_train)\r\nr2score = r2_score(y_test, y_pred)\r\nprint(y_pred.size)\r\nprint(\"\\nR2-Score:\", r2score, \"\\nR2-score train:\", r2score_train)\r\n\r\n# Export predicted data vs Ground truth\r\ncompare_result = pd.DataFrame()\r\ncompare_result['Next time'] = y_test\r\ncompare_result['Predicted Event'] = y_pred\r\n\r\ncompare_result.to_csv('Linear_Regression_Prediction_Ground_Truth.csv', index = False)\r\n","repo_name":"DiscordTurtle/ProcessMining","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":6356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34058653911","text":"# -*- coding: utf-8 -*- \r\n# @Time : 2022/11/2 16:22\r\n# @Author:One77\r\n# @FileName: Q3.py\r\n# @Software: PyCharm\r\n\r\n'''\r\n3 编写一个程序,读取文件中保存的10个学生成绩名单信息(学号,姓名, Python课程分数);\r\n然后按照分数从高到低进行排序输出\r\n'''\r\n\r\nwith open(\"data\\\\student.txt\", \"r\", encoding='utf-8') as f_read:\r\n read = f_read.read()\r\n\r\nmylist = read.split()\r\n\r\nmydict = {}\r\nlistall = []\r\nlistsmall = []\r\n\r\nfor i in range(4, 32, 3):\r\n listsmall = [mylist[i - 1], mylist[i], mylist[i + 1]]\r\n list2 = listsmall\r\n listall.append(list2)\r\n\r\nlistall.sort(key=lambda x: x[2],reverse=True)\r\n\r\nwith open(\"data\\\\stu_write.txt\", \"w\", encoding='utf-8') as f_write:\r\n f_write.write(\"姓名 学号 分数\\n\")\r\n for i in range(10):\r\n str = ' '.join(listall[i])\r\n print(str)\r\n f_write.write(str + '\\n')\r\n","repo_name":"WANQIQI77/Python_learning","sub_path":"homework/homework3/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"539061484","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtWebEngineWidgets import *\nimport sys\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setWindowTitle('打开外部网页例子')\n self.setGeometry(122, 30, 1355, 730)\n self.browser = QWebEngineView()\n #加载外部的Web页面\n self.browser.setHtml('''\n \n \n \n \n \n \n \n

    Hello PyQt5

    \n

    Hello PyQt5

    \n

    Hello PyQt5

    \n

    Hello PyQt5

    \n \n \n '''\n )\n self.setCentralWidget(self.browser)\n \nif __name__ == '__main__':\n app = QApplication(sys.argv)\n win = MainWindow()\n win.show()\n sys.exit(app.exec_())\n \n","repo_name":"yergen/PyQt5","sub_path":"chapter05/qt05_webview03.py","file_name":"qt05_webview03.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"73198031237","text":"from gooey import Gooey, GooeyParser\nimport sys\nimport os\nfrom subprocess import Popen, PIPE\n\nREGRESSOR = [\"Linear\", \"Ridge\", \"Lasso\"]\nCLASSIFIER = [\"Decision_Tree\", \"Random_Forest\", \"SVM\"]\nCLUSTERING = [\"K_Means\", \"Label_Propagation\"]\nIMAGE = [\"Image_Classification\",\"Mask_RCNN\"]\nNLP = [\"Bert\", \"Sentiment_Analysis\", \"Word2Vec\"]\n\n\n\n@Gooey(program_name=\"PoAI\",image_dir='image',\n menu = [{'name': 'File','items' : [{\n 'type': 'AboutDialog',\n 'menuTitle': 'About',\n 'name': 'PoAI',\n 'description': 'POSTECH AI',\n 'copyright': '2020',\n 'website': 'https://github.com/chan8616/PoAI',\n 'developer': '영현, 찬양, 현지'},\n {'type': 'Link',\n 'menuTitle': 'Visit Our Site',\n 'url': 'http://piai.postech.ac.kr/'}\n ]\n },{\n 'name' : 'Help',\n 'items' : [{\n 'type' : 'Link',\n 'menuTitle' : 'GUI information',\n 'url' : 'https://github.com/chriskiehl/Gooey'\n }]\n }]\n )\n\ndef main():\n desc = \"Choose your model\"\n main_parser = GooeyParser(description=desc)\n model_sel_parser = main_parser.add_argument_group(\"Model Select\", gooey_options={'show_border': True, 'columns': 1})\n\n model_kind = model_sel_parser.add_mutually_exclusive_group()\n model_kind.add_argument('--Regression',\n choices=REGRESSOR,\n dest = \"Regression Model\")\n\n model_kind.add_argument('--Classification',\n choices=CLASSIFIER,\n dest=\"Classification Model\")\n\n model_kind.add_argument('--Clustering',\n choices=CLUSTERING,\n dest=\"Clustering Model\")\n\n model_kind.add_argument('--Image',\n choices=IMAGE,\n dest=\"Image Processing Model\")\n\n model_kind.add_argument('--Nlp',\n choices=NLP,\n dest=\"Natural Language Processing Model\")\n\n args = main_parser.parse_args()\n for val in vars(args).values():\n if val is not None:\n model_name = val\n break;\n\n print(\"[Start]\\t{}\".format(model_name))\n PYTHON_PATH = sys.executable\n process = Popen([PYTHON_PATH, os.path.join('Model', model_name, 'run.py')], stdout=PIPE, stderr=PIPE)\n output, error = process.communicate()\n\n # print(output)\n # print(error)\n\n print(\"[End]\\t{}\".format(model_name))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chan8616/PoAI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"18303323836","text":"from discord.ext import commands\nfrom discord import Embed, Activity, ActivityType, Status, Streaming, Game\nfrom botmodules import serverfiles, apis\n\n#\n\nextensionfolder = \"botcmds\"\nextensions = ['basic','support','moderation','games','help','channels','music','owneronly','converters','embedgenerator']\nsudo_ids = [285832847409807360]\nsudo_seperator = \"--sudo\"\nall_prefixes = [\"/\",\"!\",\"$\",\".\",\"-\",\">\",\"?\"]\n\n# Own functions\n\ndef get_prefix(client, message):\n if message.guild:\n prefixes = ['/']\n else:\n prefixes = all_prefixes\n return commands.when_mentioned_or(*prefixes)(client, message)\n\n# Own classes\n\nclass MyContext(commands.Context):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.apis = apis\n\n if self.guild is not None:\n self.data = serverfiles.Server.getServer(self.guild.id)\n\n ## manupulate ctx for --sudo arg\n if int(self.author.id) in sudo_ids:\n if sudo_seperator in self.message.content:\n try:\n msg = self.message.content\n newmsg = msg.split(sudo_seperator)[0]\n newmember = msg.split(sudo_seperator)[1]\n self.message.content = newmsg\n userid = int(newmember.strip().lstrip(\"<@\").lstrip(\"!\").lstrip(\"&\").rstrip(\">\") if \"<@\" in newmember and \">\" in newmember else newmember)\n member = self.guild.get_member(userid)\n self.author = member\n self.message.author = member\n except (ValueError, ) as e:\n print(\"[SUDO] - Kein gültiges Mitglied: \"+newmember+\" - Fehler: \"+e)\n\n\n def getargs(self, raiserrorwhenmissing=False):\n msg = self.message.content.split(\" \")\n calledbymention = bool(self.prefix in all_prefixes)\n length = len(self.args)+len(self.kwargs)-int(calledbymention)\n txt = (\" \".join(msg[length::])) if len(msg) > length else \"\"\n newmessage = txt.split(sudo_seperator)[0].strip()\n if not newmessage and raiserrorwhenmissing:\n raise commands.BadArgument(message=\"Du hast ein wichtiges Argument vergessen!\")\n return newmessage\n\n async def sendEmbed(self, *args, message:str=\"\", **kwargs):\n return await self.send(message, embed=self.getEmbed(*args, **kwargs))\n\n def getEmbed(self, title:str, description:str=\"\", color:int=0x000000, fields:list=[], inline=True, thumbnailurl:str=None, authorurl:str=\"\", authorname:str=None, footertext:str=\"Angefordert von USER\", footerurl:str=\"AVATARURL\", timestamp=False):\n EMBED = Embed(title=title, description=description, color=color)\n EMBED.set_footer(text=footertext.replace(\"USER\", str(self.author.name+\"#\"+self.author.discriminator)), icon_url=footerurl.replace(\"AVATARURL\", str(self.author.avatar_url)))\n if timestamp:\n EMBED.timestamp = datetime.utcnow() if timestamp is True else timestamp\n for field in fields:\n EMBED.add_field(name=field[0], value=field[1], inline=bool(field[2] if len(field) > 2 else inline))\n if thumbnailurl:\n EMBED.set_thumbnail(url=thumbnailurl.strip())\n if authorname:\n if authorurl and (\"https://\" in authorurl or \"http://\" in authorurl):\n EMBED.set_author(name=authorname, url=authorurl.strip())\n else:\n EMBED.set_author(name=authorname)\n return EMBED\n\n async def tick(self, value):\n emoji = '\\N{WHITE HEAVY CHECK MARK}' if value else '\\N{CROSS MARK}'\n try:\n await self.message.add_reaction(emoji)\n except discord.HTTPException:\n pass\n\n\nclass MyBot(commands.Bot):\n async def get_context(self, message, *, cls=MyContext):\n return await super().get_context(message, cls=cls)\n\n\n# create Bot\n\nbot = MyBot(\n command_prefix=get_prefix,\n description='Das ist eine Beschreibung!',\n case_insensitive=True,\n activity=Activity(type=ActivityType.listening, name=\"/help\"),\n status=Status.idle\n)\n\n# Events\n\nfrom botevents.on_voice_state_update import setup as setup_on_voice_state_update\nfrom botevents.on_command_error import setup as setup_on_command_error\n\nsetup_on_voice_state_update(bot)\nsetup_on_command_error(bot)\n\n@bot.event\nasync def on_ready():\n print(f\"[Bot] - Logged in as '{bot.user.name}' - '{bot.user.id}'\")\n bot.remove_command('help')\n for extension in extensions:\n try:\n bot.load_extension(extensionfolder+\".\"+extension)\n except commands.errors.ExtensionAlreadyLoaded:\n pass\n return\n\n@bot.event\nasync def on_command(ctx):\n #print(f\"[Command] - '{ctx.message.content}' von '{ctx.author.name}#{str(ctx.author.discriminator)}'\")\n if ctx.guild is not None:\n try:\n await ctx.message.delete()\n except:\n pass\n\n# Hidden commands\n\n@bot.command(aliases=[\".\"])\nasync def destroy(ctx):\n pass\n\n\n# Start\n\ndef run(TOKEN):\n bot.run(TOKEN,bot=True,reconnect=True)\n\n\nimport sys, os\n\nif __name__ == \"__main__\":\n if 'DISCORD_RAFAELSBOT' in os.environ:\n run(os.environ.get('DISCORD_RAFAELSBOT'))\n\n elif len(sys.argv) > 1:\n run(sys.argv[1])\n else:\n print(\"[Bot] - No TOKEN found! Enter it manually...\")\n run(input(\"TOKEN: \"))\n","repo_name":"rafaelurben/python-discordbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"34467253990","text":"import heapq\ndef solution(operations):\n answer = []\n q = []\n\n mini = 1e9\n maxi = -1e9\n \n for i in operations:\n if i[0]== (\"I\"): \n q.append(int(i[2:]))\n elif i == (\"D -1\") and q:\n heapq.heappop(q)\n else:\n if q:\n temp_q = [] \n for j in q:\n temp_q.append(-j)\n heapq.heapify(temp_q)\n temp = -(heapq.heappop(temp_q))\n for k in range(len(q)):\n if q[k] == temp:\n del q[k]\n break\n heapq.heapify(q)\n if len(q) == 0:\n answer = [0,0]\n else:\n mini = min(heapq.heappop(q), mini)\n q2 = []\n for j in q:\n q2.append(-j)\n heapq.heapify(q2)\n maxi = max(-(heapq.heappop(q2)), maxi)\n answer = [maxi, mini]\n return answer\n","repo_name":"soodal5629/codingTestProblemSolve","sub_path":"이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"251328240","text":"from asyncio import sleep, run\n\nfrom app.telegram import Telegram\nfrom app import Config, log\n\n\nasync def main() -> None:\n config = Config()\n client = Telegram.client(\n string_session=config.telegram.string_session,\n api_id=config.telegram.api_id,\n api_hash=config.telegram.api_hash,\n logger=logger,\n )\n while True:\n await client.send_message(\n chat_id=config.spam.chat_id,\n message=config.spam.message,\n )\n await sleep(config.spam.timer)\n\n\nif __name__ == \"__main__\":\n logger = log()\n try:\n run(main())\n except (KeyboardInterrupt, SystemExit):\n logger.error(\"Program stopped by user\")\n","repo_name":"ani4a/Telegram-Timed-Messages","sub_path":"app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1733701323","text":"# Validate Subsequence\n# Difficulty: Easy\n# link: https://www.algoexpert.io/questions/Validate%20Subsequence\n\n# Given two non-empty arrays of integers, write a function that determines whether the second array is a subsequence\n# of the first one.\n# A subsequence of an array is a set of numbers that aren't necessarily adjacent in the array but that they are in the\n# same order as they appear in the array. For instance, the numbers [1, 3, 4] form a subsequence of the array [1,2,3,4],\n# and so do the numbers [2, 44]. Note that a single number in an array and the array itself are both valid subsequences\n# of the array.\n\n# Sample input:\n# array = [5, 1, 22, 25, 6, -1, 8, 10]\n# sequence = [1, 6, -1, 10]\n\n# Sample output\n# true\n\n# Time: O(N), iterate through the main array\n# Space: O(1), not storing anything but the 2 pointers\n# Solution: set 2 pointers, one on the main array and one on the sequence, starting from the beginning, then iterate\n# through the array and the sequence to find matched and ordered items from the sequence\n\n# Solution 1\ndef isValidSubsequence(array, sequence):\n arrIdx = 0\n seqIdx = 0\n while arrIdx < len(array) and seqIdx < len(sequence): # while both pointers are still within the length\n if array[arrIdx] == sequence[seqIdx]: # when 2 pointers are matched\n seqIdx += 1 # move the seqIdx pointer to the next\n arrIdx += 1 # move the arrIdx pointer to the next\n return seqIdx == len(sequence) # return True if it reaches to the end of the sequence\n\n\n# Solution 2\ndef isValidSubsequence(array, sequence):\n seqIdx = 0\n for value in array: # for every value in the (main) array\n if seqIdx == len(sequence): # break if it reaches to the end of the sequence\n break\n if sequence[seqIdx] == value: # if value in array matches the value in seqIdx\n seqIdx += 1 # move seqIdx to the right\n return seqIdx == len(sequence) # return true if it reaches to the end of the sequence\n","repo_name":"henrylin2008/Coding_Problems","sub_path":"Arrays/isValidSubsequence.py","file_name":"isValidSubsequence.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"35903084691","text":"\n\nsomeList = [1, 2, '3', 4, 'name', 10, 33, 'Python']\nsortedList = []\n\n\ndef filter_list(arr):\n for item in arr:\n if type(item) is int:\n sortedList.append(item)\n else:\n continue\n print(str(arr) + \" == \" + str(sortedList))\n\n\nfilter_list(someList)\n","repo_name":"Nikolssa/python_Tasks","sub_path":"list_filtering.py","file_name":"list_filtering.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"19181372119","text":"from collections import deque\n\n#8*8 Grid\nN = 8\n\n#Make all grids as not visited\nvisited = [[(-1,-1) for i in range(0,N)] for j in range(0,N)]\n\n#Get all possible movements for the knight\ndef getPositions(current):\n\t#all possible movments for the knight \n dx = [2, 2, -2, -2, 1, 1, -1, -1] \n dy = [1, -1, 1, -1, 2, -2, 2, -2] \n positions = []\n for i in range(8):\n \tnewpostion = (current[0]+dx[i],current[1]+dy[i])\n \t#Check if new position lies inside grid:\n \tif newpostion[0]>=0 and newpostion[0]< N and newpostion[1]>=0 and newpostion[1]%s   \" % (link[1], link[0])\n\t\t\n\treturn ret\n\n\ndef _make_time_range(param, link):\n\tjs_chart_list = ''\n\n\tif 'start_date' in param:\n\t\tscript = jscript(\"$('#start_date').val('%s');\" % (param['start_date']))\n\t\tjs_chart_list += script.render()\n\n\tif 'end_date' in param:\n\t\tscript = jscript(\"$('#end_date').val('%s');\" % (param['end_date']))\n\t\tjs_chart_list += script.render()\n\n\n\trealtime = jquery_radio('realtime')\n\trealtime.push_item('on')\n\trealtime.push_item('off')\n\taction = \"\"\"\n\t\tvar s = $(this).attr('id');\n\t\tvar diff = 0;\n\t\tif (s == 'on') {\n\t\t\twindow.location.href=%s + '&auto_update=5&diff=5';\n\t\t}\n\t\telse if (s == 'off') {\n\t\t\twindow.location.href=%s;\n\t\t}\n\n\t\"\"\" % (link, link)\n\n\trealtime.set_action(action)\n\n\n\t# radio button\n\trange_radio = jquery_radio('range_radio')\n\trange_radio.push_item('5m')\n\trange_radio.push_item('30m')\n\trange_radio.push_item('1h')\n\trange_radio.push_item('3h')\n\trange_radio.push_item('6h')\n\trange_radio.push_item('12h')\n\trange_radio.push_item('1d')\n\trange_radio.push_item('2d')\n\trange_radio.push_item('1w')\n\trange_radio.push_item('1M')\n\trange_radio.push_item('3M')\n\trange_radio.push_item('6M')\n\trange_radio.push_item('1Y')\n\taction = \"\"\"\n\t\tvar s = $(this).attr('id');\n\t\tvar diff = 0;\n\t\tif (s == '5m') {\n\t\t\tdiff = 5*60;\n\t\t}\n\t\telse if (s == '30m') {\n\t\t\tdiff = 30*60;\n\t\t}\n\t\telse if (s == '1h') {\n\t\t\tdiff = 1*3600;\n\t\t}\n\t\telse if (s == '3h') {\n\t\t\tdiff = 3*3600;\n\t\t}\n\t\telse if (s == '6h') {\n\t\t\tdiff = 6*3600;\n\t\t}\n\t\telse if (s == '12h') {\n\t\t\tdiff = 12*3600;\n\t\t}\n\t\telse if (s == '1d') {\n\t\t\tdiff = 1*3600*24;\n\t\t}\n\t\telse if (s == '2d') {\n\t\t\tdiff = 2*3600*24;\n\t\t}\n\t\telse if (s == '1w') {\n\t\t\tdiff = 1*3600*24*7;\n\t\t}\n\t\telse if (s == '1M') {\n\t\t\tdiff = 1*3600*24*30;\n\t\t}\n\t\telse if (s == '3M') {\n\t\t\tdiff = 3*3600*24*30;\n\t\t}\n\t\telse if (s == '6M') {\n\t\t\tdiff = 6*3600*24*30;\n\t\t}\n\t\telse if (s == '1Y') {\n\t\t\tdiff = 1*3600*24*365;\n\t\t}\n\n\t\tvar ed = new Date($('#end_date').val());\n\t\tvar offset = ed.getTimezoneOffset() * 60;\n\t\ted.setSeconds(ed.getSeconds() - offset);\n\t\tvar end_ts = ed.getTime()/1000;\n\n\t\tvar start_ts = end_ts - diff;\n\t\tvar sd = new Date(start_ts*1000);\n\n\t\tvar sd_str = sd.toISOString();\n\t\tvar date = sd_str.substring(0, 10)\n\t\tvar time = sd_str.substring(11, 16)\n\t\tsd_str = date + \" \" + time\n\n\t\t$('#start_date').val(sd_str);\n\n\t\twindow.location.href=%s + '&start_date=' + $('#start_date').val() + '&end_date=' + $('#end_date').val();\n\t\"\"\" % (link)\n\n\trange_radio.set_action(action)\n\n\n\t\n\n\t# input & rendering\n\tdate_template = \"\"\"\n\t\t
    \n\t\t\ttime range
    \n\t\t\t\n\n\t\t\t\n\t\t\t
    \n\n\n\t\t\t ~ \n\t\t\t
    \n\t\t\t\n\n\t\t\t\n\t\t\t
    \n\n\n\t\t\t     \n\t\t\tset start before\n\t\t\t
    \n\t\t\t%s\n\t\t\t
    \n\n\t\t\t     \n\t\t\trealtime\n\t\t\t
    \n\t\t\t%s\n\t\t\t
    \n\t\t\t%s\n\t\t
    \n\n\t\t\n\t\"\"\"\n\n\tend_date = datetime.datetime.now()\n\tstart_date = end_date - datetime.timedelta(0, 60*30)\n\tif 'auto_update' in param:\n\t\ttry:\n\t\t\tauto_update_time = int(param['auto_update']) * 1000\n\t\texcept:\n\t\t\tauto_update_time = 5000\n\n\t\tend_date += datetime.timedelta(0, 60)\n\n\t\tauto_update = \"\"\"\n\t\t \n\t\t\"\"\"\n\t\tif 'diff' in param:\n\t\t\tauto_update = auto_update %(param['diff'], link, auto_update_time // 1000, param['diff'], auto_update_time)\n\t\telse:\n\t\t\tauto_update = auto_update %('30', link, auto_update_time // 1000, '30', auto_update_time)\n\t\tjs_chart_list += date_template % (start_date.strftime(\"%Y-%m-%d %H:%M\"), link, end_date.strftime(\"%Y-%m-%d %H:%M\"), link, range_radio.render(), realtime.render(), auto_update) # set initial time\n\telse:\n\t\tjs_chart_list += date_template % (start_date.strftime(\"%Y-%m-%d %H:%M\"), link, end_date.strftime(\"%Y-%m-%d %H:%M\"), link, range_radio.render(), realtime.render(), '') # set initial time\n\n\treturn js_chart_list\n\n\ndef _make_static_chart_list(param, url, levels, level_items):\n\t## list rendering\n\tjs_chart_list = ''\n\t#print(level_items)\n\n\ttype = None # chart type\n\tif 'type' in param:\n\t\ttype = param['type']\n\n\tac_levels = []\n\tfor i in range(0, len(levels)):\n\t\tac_tmp = jquery_autocomplete(levels[i])\n\t\tac_levels.append(ac_tmp)\n\n\t# make link\n\tif type is not None:\n\t\tlink = \"'/%s?type=%s'\" % (url, type)\n\telse:\n\t\tlink = \"'/%s?'\" % (url)\n\n\tfor i in range(0, len(levels)):\n\t\tlink = \"%s + '&%s=' + %s\" % (link, levels[i], ac_levels[i].val())\n\t\t\n\n\tfor i in range(0, len(levels)):\n\t\tkey_list = level_items[i]\n\n\t\tactions = ac_levels[i].val('ui.item.label') + ';'\n\t\tactions += \"window.location.href=%s;\" % (link)\n\t\tac_levels[i].set(key_list, actions)\n\n\n\t# set script\n\tfor i in range(0, len(levels)):\n\t\tjs_chart_list += '%s %s   ' % (levels[i], ac_levels[i].render())\n\t\t\n\n\tfor i in range(0, len(levels)):\n\t\tif levels[i] in param:\n\t\t\tscript = jscript(ac_levels[i].val_str(param[levels[i]]) + ';')\n\t\t\tjs_chart_list += script.render()\n\n\n\tjs_chart_list = '
    %s
    ' % js_chart_list\n\tjs_chart_list += _make_time_range(param, link)\n\n\treturn js_chart_list\n\n\n\n\ndef _make_dynamic_chart_list(param, url, levels, chart_map):\n\t## list rendering\n\tjs_chart_list = ''\n\t#print(chart_map)\n\n\ttype = None # chart type\n\tif 'type' in param:\n\t\ttype = param['type']\n\n\tac_levels = []\n\tfor i in range(0, len(levels)):\n\t\tac_tmp = jquery_autocomplete(levels[i])\n\t\tac_levels.append(ac_tmp)\n\n\tlink = ''\n\n\n\tif (len(levels) == 1): # only 1 level\n\t\tac_curr = ac_levels[0]\n\t\tif type is not None:\n\t\t\tactions = \"window.location.href='/%s?type=%s&%s=' + ui.item.label;\" % (url, type, levels[0])\n\t\telse:\n\t\t\tactions = \"window.location.href='/%s?%s=' + ui.item.label\" % (url, levels[0])\n\n\t\tkey_list = list(chart_map.keys())\n\t\tkey_list.sort()\n\t\tac_curr.set(key_list, actions)\n\n\telse: \t\t\t# multi level\n\t\tfor i in range(0, len(levels)):\n\t\t\tac_curr = ac_levels[i]\n\t\t\t\n\t\t\tactions = ac_curr.val('ui.item.label') + ';'\n\n\n\n\t\t\tif i < len(levels)-1:\t\t\t# non-leaf level\n\t\t\t\tac_child = ac_levels[i+1]\n\n\t\t\t\tif i == 0:\n\t\t\t\t\tif type is not None:\n\t\t\t\t\t\tlink = \"'/%s?type=%s&%s=' + %s\" % (url, type, levels[i], ac_curr.val())\n\t\t\t\t\telse:\n\t\t\t\t\t\tlink = \"'/%s?%s=' + %s\" % (url, levels[i], ac_curr.val())\n\n\t\t\t\t\tkey_list = list(chart_map.keys())\n\t\t\t\t\tkey_list.sort()\n\t\t\t\telse:\n\t\t\t\t\tlink = \"%s + '&%s=' + %s\" % (link, levels[i], ac_curr.val())\n\t\t\t\t\tkey_list = []\n\n\t\t\t\tactions += ac_child.source(link) + ';'\n\t\t\t\tactions += ac_child.val_str('') + ';'\n\n\t\t\t\tac_curr.set(key_list, actions)\n\t\t\telse:\t\t\t\t\t# leaf level\n\t\t\t\tactions = \"window.location.href=%s + '&%s=' + ui.item.label;\" % (link, levels[i])\n\t\t\t\tlink = \"%s + '&%s=' + %s\" % (link, levels[i], ac_curr.val()) # used later\n\t\t\t\tac_curr.set([], actions)\n\n\t# set script\n\tfor i in range(0, len(levels)):\n\t\tjs_chart_list += '%s %s   ' % (levels[i], ac_levels[i].render())\n\t\t\n\n\tfor i in range(0, len(levels)):\n\t\tif levels[i] in param:\n\t\t\tscript = jscript(ac_levels[i].val_str(param[levels[i]]) + ';')\n\t\t\tjs_chart_list += script.render()\n\n\tjs_chart_list = '
    %s
    ' % js_chart_list\n\tif url != 'query':\n\t\tjs_chart_list += _make_time_range(param, link)\n\n\treturn js_chart_list\n\n\ndef _get_ts(param):\n\tend_ts = int(time.time())\n\tstart_ts = end_ts - 60*30\n\n\tif 'start_date' in param and param['start_date'] != '':\n\t\tstart_date = datetime.datetime.strptime(param['start_date'], '%Y-%m-%d %H:%M')\n\t\tstart_ts = int(start_date.timestamp())\n\n\tif 'end_date' in param and param['end_date'] != '':\n\t\tend_date = datetime.datetime.strptime(param['end_date'], '%Y-%m-%d %H:%M')\n\t\tend_ts = int(end_date.timestamp())\n\n\treturn start_ts, end_ts\n\n\ndef system_page(request):\n\t## list rendering\n\tprint('####### system page request ########')\n\tprint(request.GET)\n\n\tlevels = [ 'server', 'item' ]\n\tentity_list = []\n\titem_list = [ 'brief', 'cpu', 'memory', 'swap', 'disk', 'net', 'resource' ]\n\n\tentity_list = common.core.get_entity_list()\n\t#print(entity_list)\n\t\n\tentity_list.sort()\n\tjs_chart_list = _make_static_chart_list(request.GET, 'system', levels, [ entity_list, item_list ])\n\n\t# chart data\n\tjs_chart_data = ''\n\tstart_ts, end_ts = _get_ts(request.GET)\n\n\tif 'server' in request.GET and request.GET['server'] != '' and 'item' in request.GET and request.GET['item'] != '':\n\t\tloader = common.core.system_view(request.GET['server'], request.GET['item'])\n\t\tchart_data_list = loader.load(start_ts, end_ts)\n\n\t\tjs_chart_data = ''\n\t\tfor chart_data in chart_data_list:\n\t\t\tjs_chart_data += chart_data.render()\n\t\t\n\tif 'ajax' in request.GET:\n\t\treturn HttpResponse(json.dumps({'reponse': 'success', 'chart_data': js_chart_data}), content_type='application/json')\n\n\t## make view\n\tvariables = {\n\t\t'main_link': _make_main_link(),\n\t\t'chart_list': js_chart_list,\n\t\t'chart_data': js_chart_data\n\t}\n\t\n\treturn render(request, 'system_page.html', variables)\n\n\ndef expr_page(request):\n\tlevels, chart_map = common.core.get_chart_list(request.GET) # for init (preload cloud map)\n\n\tprint('####### expr page request ########')\n\n\tif request.method == 'POST':\n\t\tparam = request.POST\n\telse:\n\t\tparam = request.GET\n\tprint(param)\n\n\t\n\texpr = ''\n\texpr_form = chart_expr_form(data=param)\n\tif expr_form.is_valid():\n\t\texpr = expr_form.cleaned_data['expr']\n\n\tprint('## expr: %s' % expr)\n\t## make view\n\n\t## eval expression\n\tjs_chart_data = ''\n\tif expr != '':\n\t\ttry:\n\t\t\tx_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n\t\t\tif x_forwarded_for:\n\t\t\t\tip = x_forwarded_for.split(',')[0]\n\t\t\telse:\n\t\t\t\tip = request.META.get('REMOTE_ADDR')\n\n\t\t\tsyslog('[hubblemon-expr_page:%s] %s' % (ip, query))\n\t\t\tloader = eval(expr)\n\t\t\t#print(loader)\n\n\t\t\t# allow list or tuple in expr_text\n\t\t\tif isinstance(loader, list) or isinstance(loader, tuple):\n\t\t\t\tloaders = loader\n\t\t\telse:\n\t\t\t\tloaders = [loader]\n\t\t\n\t\t\t## chart rendering\n\t\t\tstart_ts, end_ts = _get_ts(param)\n\n\t\t\tprint(loaders)\n\t\t\tfor loader in loaders:\n\t\t\t\tprint(loader)\n\t\t\t\tif hasattr(loader, 'load'):\n\t\t\t\t\tchart_data_list = loader.load(start_ts, end_ts)\n\t\t\t\t\tfor chart_data in chart_data_list:\n\t\t\t\t\t\tjs_chart_data += chart_data.render()\n\t\t\t\telse:\n\t\t\t\t\tjs_chart_data += str(loader)\n\t\t\t\t\n\t\texcept Exception as e:\n\t\t\tjs_chart_data = '''\n\t\t\t\t

    evaluation error

    \n\t\t\t\t

    source: %s

    \n\t\t\t\t

    exception: %s

    \n\t\t\t''' % (expr, str(e))\n\n\t## set time range\n\tstart_date = ''\n\tend_date = ''\n\tif 'start_date' in param:\n\t\tstart_date = param['start_date']\n\tif 'end_date' in param:\n\t\tend_date = param['end_date']\n\tdate_range = '''\n\t\t\t''' % (start_date, end_date)\n\tjs_chart_list = _make_time_range(param, \"'/expr?expr=%s'\" % urllib.parse.quote(expr))\n\n\tif 'ajax' in param:\n\t\treturn HttpResponse(json.dumps({'reponse': 'success', 'chart_data': js_chart_data}), content_type='application/json')\n\t## make view\n\tvariables = {\n\t\t'main_link': _make_main_link(),\n\t\t'expr_form': expr_form,\n\t\t'date_range': date_range,\n\t\t'chart_list': js_chart_list,\n\t\t'chart_data': js_chart_data\n\t}\n\t\n\treturn render(request, 'expr_page.html', variables)\n\n\ndef chart_page(request):\n\tprint('####### chart page request ########')\n\n\tparam = request.GET\n\tprint(param)\n\n\t## list rendering\n\tlevels, chart_map = common.core.get_chart_list(param)\n\tif (len(levels) == 0):\n\t\tvariables = {\n\t\t\t'main_link': _make_main_link(),\n\t\t\t'chart_list': '',\n\t\t\t'chart_data': ''\n\t\t}\n\n\t\treturn render(request, 'chart_page.html', variables)\n\t\n\tjs_chart_list = _make_dynamic_chart_list(param, 'chart', levels, chart_map)\n\tprint(levels)\n\t#print(chart_map)\n\n\t# case 1. not selected anyone\n\tif levels[0] not in param:\n\t\t#print('## return chart map')\n\t\tvariables = {\n\t\t\t'main_link': _make_main_link(),\n\t\t\t'chart_list': js_chart_list\n\t\t}\n\n\t\treturn render(request, 'chart_page.html', variables)\n\n\t# case 2. partialy selected\n\tif levels[-1] not in param:\n\t\tret = chart_map\n\t\tfor level in levels:\n\t\t\tif level in param:\n\t\t\t\tif isinstance(ret, dict) and param[level] in ret:\n\t\t\t\t\tret = ret[param[level]]\n\t\t\t\telse:\n\t\t\t\t\tret = []\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t#print('## return json: ' + json.dumps(ret))\n\t\treturn HttpResponse(json.dumps(ret))\n\n\n\t# case 3. select all (make data)\n\tloader = common.core.get_chart_data(param)\n\t#print(loader)\n\n\tif loader == None:\n\t\tvariables = {\n\t\t\t'main_link': _make_main_link(),\n\t\t\t'chart_data': 'Unknown chart id'\n\t\t}\n\t\treturn render(request, 'chart_page.html', variables)\n\n\t## chart rendering\n\tstart_ts, end_ts = _get_ts(request.GET)\n\tchart_data_list = loader.load(start_ts, end_ts)\n\n\tjs_chart_data = ''\n\tfor chart_data in chart_data_list:\n\t\tjs_chart_data += chart_data.render()\n\n\t## make view\n\tif 'ajax' in param:\n\t\treturn HttpResponse(json.dumps({'reponse': 'success', 'chart_data': js_chart_data}), content_type='application/json')\n\t\n\tvariables = {\n\t\t'main_link': _make_main_link(),\n\t\t'chart_list': js_chart_list,\n\t\t'chart_data': js_chart_data\n\t}\n\t\n\treturn render(request, 'chart_page.html', variables)\n\n\ndef query_page(request):\n\tprint('####### query page request ########')\n\n\tif request.method == 'POST':\n\t\tparam = request.POST\n\telse:\n\t\tparam = request.GET\n\tprint(param)\n\n\tauth_fields = ''\n\tquery_data = ''\n\tquery = ''\n\n\tif 'query_type' not in param: # initial default value\n\t\tparam = param.copy()\n\t\tparam['query_type'] = 'query'\n\n\tform = query_form(data=param)\n\tif form.is_valid():\n\t\tquery = form.cleaned_data['query']\n\tprint('## query: %s' % query)\n\n\tfields = common.core.auth_fields(param)\n\tfor field in fields:\n\t\tform.fields[field.label] = field\n\n\t\tauth_fields += field.label\n\t\tauth_fields += field.widget.render(field.label, '')\n\t\tauth_fields += '  '\n\t\t\n\n\t## list rendering\n\tlevels, query_map = common.core.get_chart_list(param)\n\tif (len(levels) == 0):\n\t\tvariables = {\n\t\t\t'main_link': _make_main_link(),\n\t\t\t'auth_fields': auth_fields,\n\t\t\t'query_form': form,\n\t\t\t'query_list': '',\n\t\t\t'query_data': ''\n\t\t}\n\n\t\treturn render(request, 'query_page.html', variables)\n\t\n\tjs_query_list = _make_dynamic_chart_list(param, 'query', levels, query_map)\n\t#print(levels)\n\n\t# add hidden fields for form rendering\n\tif 'type' in param:\n\t\tform.fields['type'] = forms.CharField(initial=param['type'], widget=forms.widgets.HiddenInput())\n\tfor level in levels:\n\t\tif level in param:\n\t\t\tform.fields[level] = forms.CharField(initial=param[level], widget=forms.widgets.HiddenInput())\n\n\t# partialy selected\n\tif levels[0] in param and levels[-1] not in param:\n\t\tret = query_map\n\t\tfor level in levels:\n\t\t\tif level in param:\n\t\t\t\tif isinstance(ret, dict) and param[level] in ret:\n\t\t\t\t\tret = ret[param[level]]\n\t\t\t\telse:\n\t\t\t\t\tret = []\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t#print('## return json: ' + json.dumps(ret))\n\t\treturn HttpResponse(json.dumps(ret))\n\n\t# execute query\n\tif request.method == 'POST':\t\n\t\tx_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n\t\tif x_forwarded_for:\n\t\t\tip = x_forwarded_for.split(',')[0]\n\t\telse:\n\t\t\tip = request.META.get('REMOTE_ADDR')\n\n\t\tquery_data = common.core.query(param, ip)\n\t\n\tvariables = {\n\t\t'main_link': _main_main_link(),\n\t\t'auth_fields': auth_fields,\n\t\t'query_form': form,\n\t\t'query_list': js_query_list,\n\t\t'query_data': query_data\n\t}\n\n\treturn render(request, 'query_page.html', variables)\n\n\n\n\ndef addon_page(request):\n\tprint('####### addon_page request ########')\n\n\tif request.method == 'POST':\n\t\tparam = request.POST\n\telse:\n\t\tparam = request.GET\n\n\tprint(param)\n\taddon_page_data = common.core.get_addon_page(param)\n\n\tvariables = {\n\t\t'main_link': _make_main_link(),\n\t\t'addon_page_data': addon_page_data\n\t}\n\treturn render(request, 'addon_page.html', variables)\n\n\n\n\n\n","repo_name":"naver/hubblemon","sub_path":"chart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17747,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"62"} +{"seq_id":"23205985941","text":"from Insurance.pipeline.batch_prediction import start_batch_prediciton\nfrom Insurance.pipeline.training_pipeline import start_training_pipeline\n\n# file_path = r\"C:\\Users\\aswan\\Documents\\Insurance premium prediction\\Insurance-premium-prediction-project\\medicall.csv\"\n\nif __name__ == \"__main__\":\n try:\n output = start_training_pipeline()\n print(output)\n # output = start_batch_prediciton(input_file_path=file_path)\n except Exception as e:\n print(e)\n","repo_name":"AswanthAnu/Insurance-premium-prediction-project","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"15594843443","text":"\r\nimport os\r\nfrom os import path\r\nimport pandas as pd\r\n#from config.parameter import data_from,base_url,file_name,extension_url, online\r\n#from utils import get_html,read_html\r\nfrom selenium import webdriver\r\nimport codecs\r\nimport time\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nbase_url = \"https://www.nasdaq.com\"\r\ndef read_html(file_name):\r\n with open(file_name, \"rb\") as file:\r\n return file.read()\r\n\r\ndef get_html(base_url,file_name):\r\n #set chromedriver.exe path\r\n driver = webdriver.Chrome('driver/chromedriver.exe')\r\n driver.implicitly_wait(0.5)\r\n #maximize browser\r\n driver.maximize_window()\r\n #launch URL\r\n driver.get(base_url)\r\n time.sleep(5)\r\n # obtain page source\r\n h = driver.page_source\r\n #time.sleep(5)\r\n #open file in write mode with encoding\r\n f = codecs.open(file_name, \"w\", \"utf−8\")\r\n #write page source content to file\r\n f.write(h)\r\n #close browser\r\n driver.quit()\r\n\r\n return read_html(file_name)\r\n\r\ndata = pd.read_csv(\"data/csv/nasdaq_screener.csv\")\r\ndata = data.sort_values(by=['Market Cap'], ascending=False).reset_index()\r\n\r\ndf = data[:100].copy()\r\n\r\n#postfixes = [\"dividend-history\",\"news-headlines\",\"pre-market\",\"press-releases\"\r\n# ,\"after-hours\", \"financials\",\"earnings\"]\r\n# for postfix in postfixes:\r\nfor i in range(len(df[\"Symbol\"])):\r\n try:\r\n path1 = \"data/html/company/\" #+ df[\"Symbol\"][i] + \"/\" + postfix\r\n os.makedirs(path1, exist_ok=True)\r\n url = base_url + \"/market-activity/stocks/\" + df[\"Symbol\"][i].lower()# + \"/\" + postfix\r\n if not path.exists(path1 + df[\"Symbol\"][i] + \".html\"):\r\n data = get_html(url, path1 + df[\"Symbol\"][i] + \".html\")\r\n print(\"Directory {} created successfully\".format(df[\"Symbol\"][i] ))#+ \"/\" + postfix))\r\n\r\n except OSError as error:\r\n print(\"Directory '%s' can not be created\")\r\n\r\n\r\n","repo_name":"KarryHarsh/DCPP_webscrapping","sub_path":"download_html.py","file_name":"download_html.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"8507699669","text":"import instapy as ip\n\nclass NumbaColor2Gray(ip.color_2):\n # 4.1:\n def grayscale_filter(self, image):\n \"\"\"\n Method for converting a image to a gray image using numba\n\n args:\n image (integer 3D array): The image that is to be converted\n\n returns:\n image (integer 3D array): The grayscale image\n \"\"\"\n # Make the grayscale image\n image = self.make_grayscale_filter(image) \n\n # Return the grayscale image\n return image\n\n\n # 4.1:\n @staticmethod\n @ip.numba_jit\n def make_grayscale_filter(image):\n \"\"\"\n The actual method that utilizes numba to create a gray image from an image\n\n args:\n image (integer 3D array): The image that is to be converted\n\n returns:\n image (integer 3D array): The grayscale image\n \"\"\"\n height = image.shape[0] # Read the height of the image\n width = image.shape[1] # Read the width of the image\n\n # Make the grayscale_image\n for i in range(height):\n for j in range(width):\n # Summarize the weight of the blue, green and red channel, respectively (OpenCV uses BGR, while many other image handling libraries uses RGB)\n weighted_average = int(image[i][j][0] * 0.07 + image[i][j][1] * 0.72 + image[i][j][2] * 0.21) \n # Apply the weight to the pixel in grayscale_image\n for k in range(3):\n image[i][j][k] = weighted_average\n\n return image\n\n\n # 4.1:\n def report_grayscale_filter(self, input_filename, output_directory, *report_files):\n \"\"\"\n Method for automatically writing and saving a report of the grayscale_filter-function on a given image with the numba-implementation\n\n args:\n input_filename (str): The filename and -path to the image that was used for the filter-function\n output_directory (str): The folder where the report should be saved\n *report_files (tuple): The filenames and -paths to the other reports that this method is to compare runtimes with\n \"\"\"\n # Get report\n report = self.get_report(\"grayscale\", __file__, input_filename, *report_files)\n\n # Check if report was written without errors\n if type(report) == Exception:\n raise report \n\n # Write report to file\n self.save_report(report, \"numba_report_color2gray.txt\", output_directory)\n","repo_name":"martintoft1/IN3110","sub_path":"assignment4/instapy/instapy/color2gray/numba_color2gray.py","file_name":"numba_color2gray.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39182329122","text":"import string\nimport unittest\n\nfrom e2e_test_framework.definitions import common_definitions\nimport e2e_test_framework.models.utils as model_utils\n\n\nclass UtilsTest(unittest.TestCase):\n def test_partial_template_substitute(self):\n template = string.Template(\"${name}-${batch_size}\")\n\n result = model_utils.partial_template_substitute(template, name=\"xyz\")\n\n self.assertEqual(result.substitute(batch_size=10), \"xyz-10\")\n\n def test_generate_batch_models(self):\n models = model_utils.generate_batch_models(\n id_template=string.Template(\"1234-${batch_size}\"),\n name_template=string.Template(\"model-batch-${batch_size}\"),\n tags=[\"abc\"],\n source_url_template=string.Template(\n \"https://example.com/x/${batch_size}.mlir\"\n ),\n source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,\n entry_function=\"forward\",\n input_type_templates=[\n string.Template(\"${batch_size}x128\"),\n string.Template(\"${batch_size}x256\"),\n ],\n batch_sizes=[1, 4],\n )\n\n self.assertEqual(\n models,\n {\n 1: common_definitions.Model(\n id=\"1234-1\",\n name=\"model-batch-1\",\n tags=[\"abc\", \"batch-1\"],\n source_url=\"https://example.com/x/1.mlir\",\n source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,\n entry_function=\"forward\",\n input_types=[\"1x128\", \"1x256\"],\n ),\n 4: common_definitions.Model(\n id=\"1234-4\",\n name=\"model-batch-4\",\n tags=[\"abc\", \"batch-4\"],\n source_url=\"https://example.com/x/4.mlir\",\n source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,\n entry_function=\"forward\",\n input_types=[\"4x128\", \"4x256\"],\n ),\n },\n )\n\n def test_generate_batch_models_missing_substitution(self):\n id_template_with_unknown = string.Template(\"1234-${unknown}-${batch_size}\")\n\n self.assertRaises(\n KeyError,\n lambda: model_utils.generate_batch_models(\n id_template=id_template_with_unknown,\n name_template=string.Template(\"model-batch-${batch_size}\"),\n tags=[\"abc\"],\n source_url_template=string.Template(\n \"https://example.com/x/${batch_size}.mlir\"\n ),\n source_type=common_definitions.ModelSourceType.EXPORTED_STABLEHLO_MLIR,\n entry_function=\"forward\",\n input_type_templates=[\n string.Template(\"${batch_size}x128\"),\n ],\n batch_sizes=[1, 4],\n ),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"openxla/iree","sub_path":"build_tools/python/e2e_test_framework/models/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":2127,"dataset":"github-code","pt":"62"} +{"seq_id":"31977992542","text":"import sys\n\nWIDTH = 8\nDEPTH = 65536\n\n# Esta función transforma un string de caracteres ASCII a un archivo .mif donde cada dirección tiene un caracter\ndef text_to_mif( text: str):\n with open(\"ascii.mif\", 'w') as file:\n file.write(\"WIDTH={};\\n\".format(WIDTH))\n file.write(\"DEPTH={};\\n\\n\".format(DEPTH))\n file.write(\"ADDRESS_RADIX=UNS;\\n\")\n file.write(\"DATA_RADIX=HEX;\\n\\n\")\n\n address = 2;\n file.write(\"CONTENT BEGIN\\n\")\n for char in text:\n if(address < DEPTH):\n file.write(\"\\t{0}\\t: {1:0{2}X};\\n\".format(address, ord(char), WIDTH//4))\n address += 1\n else:\n break\n\n if(address < DEPTH-1):\n file.write(\"\\t[{0}..{1}]\\t: {2:0{3}X};\\n\".format(address, DEPTH-1, 0, WIDTH//4))\n\n file.write(\"END;\")\n return\n\nif (__name__ == \"__main__\"):\n text = \"\"\n if(len(sys.argv) > 1):\n text = sys.argv[1]\n text_to_mif(\"Lorem\" + text)\n else:\n text_to_mif(\"Lorem\" + \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Lorem Cras hendrerit blandit est eget Lorem ornare.Lorem Nunc porta ante quis ante aliquet, eu tempor velit aliquam. Lorem Pellentesque posuere pharetra aliquam. Proin quis nulla at dui porta congue. Pellentesque sit amet maximus neque, et blandit diam. Donec non mi nec nibh dignissim rutrum. Vestibulum pretium lorem libero, ac tempor ex porttitor in. Nullam lacinia sagittis quam nec viverra. Nulla facilisi. Sed nec dictum sem. Pellentesque ullamcorper nisi nibh, id iaculis eros cursus Lorem vitae Lorem. \")\n","repo_name":"astuaTEC/Proyecto-Taller-Diseno-Digital","sub_path":"scripts/text_to_mif.py","file_name":"text_to_mif.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"11750007707","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render, redirect\nimport requests\nfrom .models import ShopifyProduct, ShopifyBlog, ShopifyArticle\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.http import JsonResponse\nimport json\n\n\ndef install_app(request):\n shop = request.GET.get(\"shop\", 'junior-state.myshopify.com')\n scope = 'read_products write_products read_content write_content write_themes read_themes'\n redirect_uri = request.build_absolute_uri('shopify/oauth/callback/')\n authorization_url = f'https://{shop}/admin/oauth/authorize?client_id={settings.SHOPIFY_API_KEY}&scope={scope}&redirect_uri={redirect_uri}'\n return redirect(authorization_url)\n\n\ndef oauth_callback(request):\n code = request.GET.get('code')\n shop = request.GET.get('shop')\n\n token_url = f'https://{shop}/admin/oauth/access_token'\n data = {\n 'client_id': settings.SHOPIFY_API_KEY,\n 'client_secret': settings.SHOPIFY_API_SECRET,\n 'code': code,\n }\n response = requests.post(token_url, json=data)\n\n if response.status_code == 200:\n access_token = response.json().get('access_token')\n request.session['shopify_access_token'] = access_token\n messages.success(request, 'Authentication successful.')\n return redirect('get-blogs')\n else:\n messages.error(request, 'Authentication failed.')\n return redirect('install')\n\n\ndef fetch_shopify_data(request):\n # Replace with your Shopify store's URL\n shop_url = 'junior-state.myshopify.com'\n\n # Replace with your Shopify access token\n access_token = request.session.get('shopify_access_token')\n\n # Fetch blogs from Shopify\n response = requests.get(f'https://{shop_url}/admin/api/2023-01/blogs.json',\n headers={'X-Shopify-Access-Token': access_token})\n blogs_data = response.json().get('blogs', [])\n\n for blog_data in blogs_data:\n # Create or update the blog in the database\n blog, created = ShopifyBlog.objects.get_or_create(title=blog_data.get('title', ''))\n\n # Fetch articles for the blog\n response = requests.get(f'https://{shop_url}/admin/api/2023-01/blogs/{blog_data[\"id\"]}/articles.json',\n headers={'X-Shopify-Access-Token': access_token})\n articles_data = response.json().get('articles', [])\n\n for article_data in articles_data:\n # Create or update articles for the blog\n ShopifyArticle.objects.update_or_create(\n title=article_data.get('title', ''),\n body_html=article_data.get('body_html', ''),\n blog=blog\n )\n\n # Fetch all blogs and articles from the database\n blogs = ShopifyBlog.objects.all()\n\n return render(request, 'blog_list.html', {'blogs': blogs})\n\n\ndef serialize_shopify_data(request):\n # Fetch all Shopify blogs\n blogs = ShopifyBlog.objects.all()\n\n # Create a list to store serialized data\n serialized_data = []\n\n for blog in blogs:\n serialized_blog = {\n 'title': blog.title,\n 'articles': []\n }\n\n articles = ShopifyArticle.objects.filter(blog=blog)\n\n for article in articles:\n serialized_article = {\n 'title': article.title,\n 'body_html': article.body_html\n }\n serialized_blog['articles'].append(serialized_article)\n\n serialized_data.append(serialized_blog)\n\n return JsonResponse({'blogs': serialized_data})\n","repo_name":"MoriartyJam/app-blog_list","sub_path":"shopifyapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"38490611667","text":"import requests, json\nimport datetime\n\nnow = datetime.datetime.now()\nnowTime = now.strftime('%H:%M')\ndays = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\nday = days[datetime.datetime.today().weekday()]\nnowDate = now.strftime('%Y-%m-%d')\ntomorrow = (datetime.datetime.today() + datetime.timedelta( days=1)).strftime('%Y-%m-%d')\n##########################################\n\n# 토큰 및 DB id 불러오기\nf = open(\"mydata\", 'r')\nrule_table_id = f.readline().rstrip()\ntarget_table_id = f.readline().rstrip()\ntoken = 'Bearer '+f.readline().rstrip()\nf.close()\n\n# version dependency & rule table attribute\nNotion_Version = '2021-05-13'\nURL = 'https://api.notion.com/v1/'\nget_headers = {'Authorization' : token,'Notion-Version' : Notion_Version}\npost_headers = {'Authorization' : token,'Notion-Version' : Notion_Version, 'Content-Type': 'application/json'}\n\naction_attribute_name = '행동'\ncycle_attribute_name = '주기'\npair_attribute_name = 'Pair'\nactive_time_attribute_name = 'Active Time'\nrelation_attribute_name = 'target'\nactive_attribute_name = 'active'\nInTime_attribute_name = 'In Time'\nAuto_attribute_name = 'Auto'\n\nequals_type = ['title', 'rich_text', 'url', 'email', 'phone', 'number','checkbox','select','date', 'created_time', 'last_edited_time']\ncontains_type= ['multi_select','People','relation']\n###################################################################################################################\n\n# 타겟쪽 relation 중 rule과 연결된 속성 id 추출\ntarget_relation_attribute_id = ''\nres = requests.get( URL +'databases/'+target_table_id, headers=get_headers)\nif res.status_code == 200:\n target_properties_dict = res.json()['properties']\n for key in target_properties_dict.keys():\n if target_properties_dict[key]['type'] == 'relation':\n if target_properties_dict[key]['relation']['database_id'].replace('-','') == rule_table_id :\n target_relation_attribute_id = target_properties_dict[key]['id']\n\n\ndef get_page(page_id):\n res = requests.get(URL + 'pages/' + page_id, headers=get_headers)\n if res.status_code == 200:\n\n page_properties = res.json()['properties']\n for page_property_key in page_properties.keys():\n if page_properties[page_property_key]['id'] == target_relation_attribute_id:\n del page_properties[page_property_key]\n break\n\n del page_properties[Auto_attribute_name]\n\n\n\n return page_properties\n\ndef rule_active_check(rule_dict):\n if not rule_dict[active_attribute_name]['checkbox']:\n return False\n if rule_dict[active_time_attribute_name]['rich_text'][0]['plain_text'][:3] != nowTime[:3]: #__:_x 비교 1분 단위 무시.\n return False\n rule_days = []\n for ruleDay_dict in rule_dict[cycle_attribute_name]['multi_select']:\n rule_days.append(ruleDay_dict['name'])\n if not day in rule_days and not 'Everyday' in rule_days:\n return False\n return True\n\ndef find_pair_target_id_list(rule_list,pair_num):\n result_list = []\n for rule_page in rule_list:\n rule_dict = rule_page['properties']\n if(len(rule_dict[pair_attribute_name]['rich_text'])!= 0):\n if rule_dict[pair_attribute_name]['rich_text'][0]['plain_text'] == '>' + pair_num:\n for create_target_dict in rule_dict[relation_attribute_name]['relation']:\n result_list.append(create_target_dict['id'])\n return result_list\n# rule 적용 start\ndata = {} ## 정렬추가하기\nres = requests.post(URL +'databases/'+rule_table_id+'/query',headers = post_headers, data=json.dumps(data))\nif res.status_code == 200:\n rule_list = res.json()['results']\n for rule_page in rule_list:\n rule_dict = rule_page['properties']\n if not rule_active_check(rule_dict):\n continue\n if rule_dict[action_attribute_name]['select']['name'] == 'Create':\n for create_target_dict in rule_dict[relation_attribute_name]['relation']:\n page_properties = get_page(create_target_dict['id'])\n result_data = nowDate if len(rule_dict[active_time_attribute_name]['rich_text'][0]['plain_text']) == 5 else tomorrow\n page_properties[InTime_attribute_name][\"date\"][\"start\"] = result_data + page_properties[InTime_attribute_name][\"date\"][\"start\"][10:]\n page_properties[InTime_attribute_name][\"date\"][\"end\"] = result_data + page_properties[InTime_attribute_name][\"date\"][\"start\"][10:]\n data = {\"parent\":{\"database_id\":target_table_id},\"properties\":page_properties}\n res = requests.post(URL + 'pages', headers=post_headers,data=json.dumps(data))\n if res.status_code == 200:\n print(\"created - \" + res.json()['id'])\n else:\n print(\"fail created\\n\" + page_properties)\n\n else:\n print('error: rule-action')\n","repo_name":"ewooooo/Project_Notion_Auto_To-do","sub_path":"Notion_Auto_To-do.py","file_name":"Notion_Auto_To-do.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"19345682696","text":"from pyfiglet import Figlet\nfrom bs4 import BeautifulSoup\nfrom requests import Response\nfrom concurrent.futures import ProcessPoolExecutor\nimport requests\nimport datetime\nimport urllib.parse\nimport os\n\nHEADERS = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,\"\n \"application/signed-exchange;v=b3;q=0.9\",\n \"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/93.0.4577.63 Safari/537.36\"\n}\n\nLIST_WORDS = ['SQL',\n 'Sql',\n 'DBError',\n 'Error']\n\n\ndef get_file_lines(filename: str):\n try:\n with open(filename, encoding=\"utf-8\") as f:\n symbols = f.read().split(\"\\n\")\n symbols = set(symbols)\n symbols = list(symbols)\n return symbols\n except FileNotFoundError:\n print(f\"\\n\\033[31m\\033[1m[ERROR]\\033[0m Please check if file \\033[31m\\033[4m{filename}\\033[0m exists\\n\")\n exit()\n\n\ndef check_site(site: str):\n tested_urls = []\n try:\n res = requests.get(url=site, headers=HEADERS).text\n soup = BeautifulSoup(res, \"lxml\")\n forms = soup.find_all(\"form\")\n\n for form in forms:\n action = form.get('action')\n value = form.find('input').get('name')\n if action and value:\n if 'http' in action:\n tested_urls.append(f'{action}?{value}=1')\n else:\n if site[-1] == '/':\n site = site[0:-1]\n tested_urls.append(f'{site}{action}?{value}=1')\n else:\n tested_urls.append(f'{site}{action}?{value}=1')\n return tested_urls\n except:\n return False\n print(f\"\\n\\033[31m\\033[1m[ERROR]\\033[0m Please check your urls \\033[31m\\033[4m{site}\\033[0m\\n\")\n\n\ndef error_in_body(response: Response) -> bool:\n for word in LIST_WORDS:\n if word in response.text:\n return True\n return False\n\n\ndef finish(site: str):\n counter = 0\n symbols = get_file_lines('payloads.txt')\n tested_urls = check_site(site)\n\n if not tested_urls:\n print(\n f\"\\033[31m\\033[1m[ERROR]\\033[0m \\033[34m\\033[4mURL\\033[0m \\033[31m\\033[4m{site}\\033[0m \\033[33m\\033[1mHASN'T \"\n f\"\\033[34m\\033[1m
    \\033[0m \\033[33m\\033[1mTAG\\033[0m\")\n else:\n for tested_url in tested_urls:\n for symbol in symbols:\n symbol = urllib.parse.quote_plus(symbol)\n url = f'{tested_url}{symbol}'\n res = requests.get(url)\n\n cur_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\n f\"\\033[33m\\033[1m[{cur_time} - INFO]\\033[0m: \\033[34m\\033[4m{url}\\033[0m \\033[33m\\033[1mIS CHECKING....ЁЯдФ\\033[0m\")\n\n if error_in_body(res):\n cur_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\n f\"\\033[32m\\033[1m[{cur_time} - GOOD]\\033[0m \\033[33m\\033[1mSQL-INJECTION IS POSSIBLE\\033[0m \"\n f\"\\033[34m\\033[4m{url}\\033[0m тЭЧтЭЧтЭЧ\")\n counter += 1\n with open('inj_sites.txt', 'a+', encoding='utf-8') as file:\n file.write(f'{url}\\n')\n else:\n cur_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\n f\"\\033[31m\\033[1m[{cur_time} - BAD]\\033[0m \\033[33m\\033[1m NOT INJECTION \\033[0m \"\n f\"\\033[34m\\033[4m{url}\\033[0m\")\n\n\ndef main():\n sites = get_file_lines('sites.txt')\n if len(sites) <= 20:\n with ProcessPoolExecutor(max_workers=len(sites)) as ex:\n ex.map(finish, sites)\n\n if os.path.exists('inj_sites.txt'):\n counter = len(get_file_lines('inj_sites.txt'))\n cur_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\n f\"\\n\\033[33m\\033[1m[GOOD WORK]\\033[0m \\033[34m\\033[4m{counter}\\033[0m URLS \\033[33m\\033[1mMAYBE HAS SQL-VULNERABILITY\\033[0m тЭЧтЭЧтЭЧ\")\n else:\n cur_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\n f\"\\n\\033[31m\\033[1m[BAD WORK]\\033[0m \\033[34m\\033[4m0 URLS\\033[0m \\033[33m\\033[1mMAYBE HAS SQL-VULNERABILITY!\\033[0m\")\n else:\n print(\n f\"\\n\\033[31m\\033[1m[ERROR]\\033[0m \\033[33m\\033[1mFile\\033[0m \\033[31m\\033[4msites.txt\\033[0m \\033[33m\\033[1mmust have 20 sites NOT MORE!\\033[0m\")\n exit()\n\n\nif __name__ == \"__main__\":\n preview_text = Figlet(font='doom', width=200)\n text = preview_text.renderText('SQL - Injections Checker v.2.0')\n print(f'\\033[35m\\033[1m{text}\\033[0m')\n print(\"\\033[35m\\033[1m-\\033[0m\" * 140)\n\n try:\n main()\n except KeyboardInterrupt:\n print(\"\\n\\n\\033[36m\\033[1m[INFO]\\033[0m PROGRAM STOPPED BY USER\\n\")\n","repo_name":"andriitk/SQL-Injection-Checker-v.2","sub_path":"checker_2.py","file_name":"checker_2.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"21298593180","text":"# coding: utf-8\n\n\nimport pymysql.cursors\nimport json\nclass OperationMysql:\n\tdef __init__(self):\n\t\tself.conn = pymysql.connect(\n host='172.16.4.7',\n port=3306,\n user='root',\n passwd='affuli123',\n db='basic_server',\n charset='utf8')\n\t\tself.cur = self.conn.cursor()\n\n\t#查询一条数据\n\tdef search_one(self,sql):\n\t\tself.cur.execute(sql)\n\t\tresult = self.cur.fetchone()\n\t\t# result = json.dumps(result)\n\t\treturn result\n\nif __name__ == '__main__':\n op_mysql = OperationMysql()\n res = op_mysql.search_one(\"SELECT * FROM send_sms_record WHERE accept_mobile = '13510000004' ORDER BY id DESC \")[3][7:13]\n print(res)\n # code = res[3][7:13]\n # print(code)\n\n","repo_name":"kellen-fang/Interface_automations","sub_path":"common/interface_mysql.py","file_name":"interface_mysql.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"36730291808","text":"from __future__ import print_function\n\nfrom message_iterators import MessageIterator\nfrom collections import defaultdict\nimport math\n\nclass Twncb():\n\t\n\tdef __init__(self, messageIterator):\n\t\tself.messageIterator = messageIterator\n\t\tself.prior = {}\n\t\tself.language_model = defaultdict(dict)\n\t\tself.V = 0\n\t\tself.correct = 0\n\t\t\n\tdef train(self):\n\t\tN = 0\n\t\tallDsInClass = defaultdict(int)\n\t\tallClasses = set()\n\t\tmultinomialmodel = defaultdict(dict)\n\t\tdocFrequency = defaultdict(int)\n\t\t#Calculate df[word]\n\t\tfor message in self.messageIterator:\n\t\t\tif message.isTest(self.messageIterator.num_msgs):\n\t\t\t\tcontinue\n\t\t\tN += 1\n\t\t\tfor word,count in message.body.items():\n\t\t\t\tdocFrequency[word] += 1\n\t\t#Perform transforms\n\t\tfor message in self.messageIterator:\n\t\t\tif message.isTest(self.messageIterator.num_msgs):\n\t\t\t\tcontinue\n\t\t\tdocClass = message.newsgroupnum\n\t\t\tallClasses.add(docClass)\n\t\t\tself.prior[docClass] = self.prior.get(docClass,0) + 1\n\t\t\td = {}\n\t\t\tfor word,count in message.body.items():\n\t\t\t\td[word] = math.log(count + 1) * math.log( float(N) / docFrequency[word])\n\t\t\tnormalizeDenominator = math.sqrt( sum( [x**2 for x in d.values()] ) )\n\t\t\tfor word,count in message.body.items():\n\t\t\t\tmultinomialmodel[word][docClass] = multinomialmodel[word].get(docClass,0) + float(d[word]) / normalizeDenominator\n\t\t\t\tallDsInClass[docClass] += multinomialmodel[word][docClass]\n\t\tself.V = len(multinomialmodel.keys())\n\t\t#NC complement: count of all d's in classes other than c\n\t\tNCComplement = defaultdict(int)\n\t\tfor eachClass in allClasses:\n\t\t\tfor otherClass in allClasses - set([eachClass]):\n\t\t\t\tNCComplement[eachClass] += allDsInClass[otherClass]\t\t\n\t\tweightsSum = defaultdict(float)\n\t\t#Get conditional probabilities : P(t|c) = d[ij] of term t not in c / #d[kj] of all terms k in all docs j not in c\n\t\tfor word in multinomialmodel.keys():\n\t\t\tfor eachClass in allClasses:\n\t\t\t\tNCicomplement = 0\n\t\t\t\t#NCiComplement hold d's of the word in docs of classes other than c\n\t\t\t\tfor otherClass in allClasses - set([eachClass]):\n\t\t\t\t\tNCicomplement += multinomialmodel[word].get(otherClass,0)\n\t\t\t\tself.language_model[word][eachClass] = math.log( (NCicomplement + 1.0) / (NCComplement[eachClass] + self.V) )\n\t\t\t\tweightsSum[eachClass] += abs(self.language_model[word][eachClass])\n\t\t#Weight normalize each word's conditional probability\n\t\tfor word in multinomialmodel.keys():\n\t\t\tfor eachClass in allClasses:\n\t\t\t\tself.language_model[word][eachClass] = self.language_model[word][eachClass] / float(weightsSum[eachClass])\n\t\t#Divide raw counts of #docs of each class by total # of docs\n\t\tfor each in self.prior.keys():\n\t\t\tself.prior[each] = float(self.prior[each]) / N\n\t\n\t\n\tdef test(self):\n\t\tcount = 0\n\t\tpreviousClass = 0\n\t\tcorrect = 0\n\t\tfor msg in self.messageIterator:\n\t\t\tcount += 1\n\t\t\tif count > 20 and msg.newsgroupnum == previousClass:\n\t\t\t\tcontinue\n\t\t\telif count > 20 and msg.newsgroupnum != previousClass:\n\t\t\t\tprint()\n\t\t\t\tcount = 1\n\t\t\tpreviousClass = msg.newsgroupnum\n\t\t\tscoreVector = []\n\t\t\tfor eachClass in self.prior.keys():\n\t\t\t\tscore = 0\n\t\t\t\t#score += math.log(1.0/20)\n\t\t\t\tfor word, wordCount in msg.body.items():\n\t\t\t\t\t\tscore += self.language_model[word].get(eachClass,0) * wordCount\n\t\t\t\tscoreVector.append(score)\n\t\t\t\t#print(score,end='\\t')\n\t\t\twinner = min(scoreVector)\n\t\t\twinnerClass = scoreVector.index(winner)\n\t\t\tprint(winnerClass,end='\\t')\n\t\t\tif winnerClass == msg.newsgroupnum:\n\t\t\t\tcorrect += 1\n\n\tdef test_marked(self):\n\t\tself.t = 0\n\t\tfor msg in self.messageIterator:\n\t\t\tif not msg.isTest(self.messageIterator.num_msgs):\n\t\t\t\tcontinue\n\t\t\tscoreVector = []\n\t\t\tfor eachClass in self.prior.keys():\n\t\t\t\tscore = 0\n\t\t\t\t#score += math.log(1.0/20)\n\t\t\t\tfor word, wordCount in msg.body.items():\n\t\t\t\t\t\tscore += self.language_model[word].get(eachClass,0) * wordCount\n\t\t\t\tscoreVector.append(score)\n\t\t\t\t#print(score,end='\\t')\n\t\t\twinner = min(scoreVector)\n\t\t\twinnerClass = scoreVector.index(winner)\n\t\t\t#print(winnerClass,end='\\t')\n\t\t\tself.t+=1\n\t\t\tif winnerClass == msg.newsgroupnum:\n\t\t\t\tself.correct += 1\n\n","repo_name":"ruravi/InformationRetrieval","sub_path":"PA4/Twncb.py","file_name":"Twncb.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"16021765370","text":"# 220528\n# [S/W 문제해결 기본] 5일차 - Magnetic\n# 서로 충돌하여 테이블 위에 남아있는 교착 상태의 개수를 구하는 프로그램\n# 테이블의 크기는 100 x 100\n# 자성체는 테이블 앞뒤쪽에 있는 N극 또는 S극에만 반응하며 자성체끼리는 반응 x\n\n# 입력1) 정사각형 테이블의 한 변의 길이\n# 입력2) 총 10개의 테스트 케이스\n# 1 = N극 자성체, 2 = S극 자성체\n# 테이블의 윗 부분에 N극, 아랫 부분에 S극이 위치한다고 가정\n\n# 출력 : #t + 교착 상태의 개수\n\nimport sys\nsys.stdin = open(\"input.txt\", \"r\")\n\nfor test_case in range(1, 10 + 1):\n n = int(input()) # 한 변의 길이 / 100으로 고정\n table =[]\n\n # 테이블 정보 입력\n for _ in range(100):\n table.append(list(map(int, input().split())))\n\n # 윗쪽 S극 제거\n for i in range(100):\n for j in range(100):\n if table[j][i] == 1:\n break\n if table[j][i] == 2:\n table[j][i] == 0\n # 아래쪽 N극 제거\n for i in range(99, -1, -1):\n for j in range(99, -1, -1):\n if table[j][i] == 2:\n break\n if table[j][i] == 1:\n table[j][i] == 0\n\n lines = [[] for _ in range(100)]\n for i in range(100):\n for j in range(100):\n if table[j][i] != 0:\n lines[i].append(table[j][i])\n result = 0\n for line in lines:\n for i in range(len(line) - 1):\n if line[i] == 1 and line[i + 1] == 2:\n result += 1\n\n print(f'#{test_case} {result}')","repo_name":"monacaron/SWEA","sub_path":"swea1220.py","file_name":"swea1220.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"24041556589","text":"import os\r\nclass salvar_arquivo:\r\n def __init__(self,name):\r\n self.name = name\r\n def salvar(self,localentrada,localsaida):\r\n os.popen(f\"copy {src} {destination}\")\r\n\r\n#main\r\ns1 = salvar_arquivo('pdf')\r\nsrc = input(\"Enter src filename:\")\r\ndestination = input(\"Enter target filename:\")\r\ns1.salvar(src,destination)\r\n\r\n","repo_name":"PamellaFarias/PadroesDeProjeto","sub_path":"salvar arquivo.py","file_name":"salvar arquivo.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"94248375","text":"import math\n\ndef checkOverlap(player, danner):\n\t\"\"\"Get player 1 postion and its center of mass .\n\tGet player 2 postion and its cemter of mass\n\t. Check collision using the distance logic\n\t\"\"\"\n\tl1 = player\n\tr1 = [l1[0]+15, l1[1]-15]\n\tl2 = danner\n\tr2 = [l2[0]+15, l2[1]-15]\n\n\t# If one rectangle is on left side of other\n\tif l1[0] > r2[0] or l2[0] > r1[0]:\n\t\treturn False\n\n\t# If one rectangle is above other\n\tif r1[1] > l2[1] or r2[1] > l1[1]:\n\t\treturn False\n\n\treturn True\n\n# p1=[]\n# p2=[]\n\n# p1.append(int(input(\"Enter x coordinate of p1\")))\n# p1.append(int(input(\"Enter y coordinate of p1\")))\n# p2.append(int(input(\"Enter x coordinate of p2\")))\n# p2.append(int(input(\"Enter y coordinate of p2\")))\n\n# if(checkOverlap(p1,p2)):\n# \tprint(\"Collision happened\")\n# else:\n# \tprint(\"No collision\")\n","repo_name":"Yashashwee/socket-pygame","sub_path":"tests/Collision_test/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40632274164","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\n\ndef get_html(url):\n res = requests.get(url)\n return res.text\n\n\ndef write_csv(info):\n with open('trial.csv', 'a', encoding='utf-8-sig', newline='') as f:\n fieldnames = ['Наименование', 'Адрес', 'Описание', 'Бренд', 'Сезон', 'Цена', 'Наличие']\n writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=';', lineterminator='\\r')\n if f.tell() == 0:\n writer.writeheader()\n writer.writerow(info)\n\n\ndef get_data(html):\n soup = BeautifulSoup(html, \"lxml\")\n element = soup.find_all('div', class_='object ga')\n\n for el in element:\n try:\n name = el.find('a', class_='title').text.strip()\n except ValueError:\n name = ''\n\n try:\n url = el.find('a', class_='title').get('href').strip()\n except ValueError:\n url = ''\n\n try:\n description = el.find('span', class_='description').text.strip()\n except ValueError:\n description = ''\n\n try:\n brand = el.find('span', class_='brand').text.strip()\n except ValueError:\n brand = ''\n\n try:\n season = el.find('span', class_='collection').text.strip()\n except ValueError:\n season = ''\n\n try:\n price = el.find('span', class_='price').text.strip()\n except ValueError:\n price = ''\n\n try:\n available = el.find('span', class_='available').find('span', class_='label').get_text()[:9] + ' ' + el.find(\n 'span', class_='available').find('span', class_='label').get_text()[10:]\n except ValueError:\n available = ''\n\n info = {\n 'Наименование': name,\n 'Адрес': url,\n 'Описание': description,\n 'Бренд': brand,\n 'Сезон': season,\n 'Цена': price,\n 'Наличие': available\n }\n\n write_csv(info)\n\n\ndef main():\n for i in range(1, 9):\n url = f'https://trial-sport.ru/gds.php?s=51525&c1=1070639&c2=1071542&gpp=20&pg={i}/'\n get_data(get_html(url))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alena-Polk/Python","sub_path":"dz/dz.33/dz.33.py","file_name":"dz.33.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"4923602852","text":"# cook your dish here\ntry:\n T = int(input())\n for _ in range(T):\n count=0\n a, b = map(int, input().split())\n while a%3!=0 and b%3!=0:\n a=abs(a-b)\n b=abs(a-b)\n count=count+1\n print(count)\nexcept ValueError:\n print(\"Invalid input. Please enter valid integers.\")","repo_name":"yashpatle23/Competitive_Programming","sub_path":"codechef/MODULO3.py","file_name":"MODULO3.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39156653140","text":"# -*- coding: utf-8 -*-\r\nimport config\r\nimport http_util as HTTP\r\nimport common_util as C\r\n\r\n# 取第一层分类\r\ndef collectHeaderTypes(soup):\r\n liEles = soup.select(\"div.home_nav_con li\")\r\n result=[]\r\n index = 0\r\n for liEle in liEles:\r\n link = liEle.find(\"a\")\r\n topType = {}\r\n topType[\"no\"] = index\r\n topType[\"name\"] = link.get_text().encode(\"utf-8\")\r\n topType[\"url\"] = link[\"href\"].encode(\"utf-8\")\r\n index = index + 1\r\n result.append(topType)\r\n return result \r\n\r\n# 取第二层分类\r\ndef collectSubTypes(soup, headerType):\r\n outerEle = soup.find(\"dl\", class_=\"detail_sort\")\r\n result = []\r\n parentName = \"\"\r\n for child in outerEle.children:\r\n if(child.name == \"dt\"):\r\n parentName = child.get_text().encode(\"utf-8\")\r\n if(child.name == \"dd\"):\r\n links = child.find_all(\"a\")\r\n for link in links:\r\n linkName = link.get_text().encode(\"utf-8\")\r\n\r\n subType = {}\r\n subType[\"parent\"] = parentName\r\n subType[\"name\"] = linkName\r\n subType[\"url\"] = link['href'].encode(\"utf-8\")\r\n if(headerType != None):\r\n subType['pno'] = headerType[\"no\"]\r\n subType[\"pname\"] = headerType[\"name\"]\r\n result.append(subType)\r\n\r\n return result\r\n\r\n# 根据页面取 JD 链接\r\ndef collectJdItems(soup, param=None):\r\n links = soup.select(\".list_middle td.td_sp1 a\")\r\n obj = {}\r\n result = []\r\n for link in links:\r\n item = {}\r\n item[\"name\"] = link[\"title\"].encode(\"utf-8\")\r\n item[\"url\"] = config.detail_pre + link['href'].encode(\"utf-8\")\r\n result.append(item)\r\n\r\n nextPage = soup.find(\"a\", class_=\"a_icon04\")\r\n if(nextPage != None):\r\n obj[\"has_next\"] = True\r\n obj[\"url\"] = nextPage[\"href\"].encode(\"utf-8\")\r\n else:\r\n obj[\"has_next\"] = False\r\n \r\n obj[\"items\"] = result\r\n return obj\r\n\r\n# 获取 JD 详细内容\r\ndef collectDetail(soup, item=None):\r\n detail = {}\r\n\r\n # 组合 csv 记录\r\n csvContent = \"jd\"\r\n csvContent = C.appendCsv(csvContent, HTTP.getText(soup, \"div.wrap_title h3 a\", \"text\")) # 公司\r\n csvContent = C.appendCsv(csvContent, HTTP.getText(soup, \"div.wrap_title h1\", \"text\")) # 职位名\r\n\r\n liEles = soup.select(\"ul.job_info li\")\r\n csvContent = C.appendCsv(csvContent, getTextFromList(liEles, \"专业要求:\"))\r\n csvContent = C.appendCsv(csvContent, getTextFromList(liEles, \"学历要求:\"))\r\n csvContent = C.appendCsv(csvContent, getTextFromList(liEles, \"工作经验:\"))\r\n csvContent = C.appendCsv(csvContent, getTextFromList(liEles, \"外语要求:\"))\r\n\r\n desEle = soup.select(\"dl.zxd_jobinfo\")\r\n desStr = \"\"\r\n if(len(desEle) > 0):\r\n desStr = desEle[0].get_text().encode(\"utf-8\").strip()\r\n\r\n desStr = desStr.replace(\"\\r\\n\",\"\").replace(\"\\n\",\"\").replace(\"\\r\",\"\")\r\n csvContent = C.appendCsv(csvContent, desStr)\r\n\r\n detail[\"csv\"] = csvContent\r\n\r\n return detail\r\n\r\n# 从 list 中取对应元素(如果 liEle 中有 span 与 textStr 相同,则取 liEle 剩余的text)\r\ndef getTextFromList(liEles, textStr):\r\n for liEle in liEles:\r\n majorSpan = liEle.find(\"span\", text=textStr)\r\n if(majorSpan != None):\r\n majorSpan.extract()\r\n return liEle.get_text().encode(\"utf-8\").strip()\r\n return \"\"","repo_name":"qinglangee/nut_code","sub_path":"python/download_jd/download_jd_chenhr/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9038880447","text":"db = {\n\n}\ngolstotal = 0\ndb['jogador'] = str(input('Nome do jogadoor: '))\nnp = (int(input('Numero de partidas: ')))\ndb['gols'] = []\nfor c in range(0, np):\n gol = int(input(f'Quantos gols na partida {c+1}? '))\n db['gols'].append(gol)\n golstotal += gol\ndb['total'] = golstotal\n\nprint('*-'*30)\nprint(db)\nprint('*-'*30)\nfor k, v in db.items():\n print(f'O Campo {k} tem o valor de {v}')\nprint('*-'*30)\nprint(f'O Jogador {db[\"jogador\"]} jogou {len(db[\"gols\"])}')\nfor c in range(0, len(db[\"gols\"])):\n print(f' »» Na partida {c+1}, {db[\"jogador\"]} fez {db[\"gols\"][c]}')\n","repo_name":"Gabmagnus/cev-curso-python","sub_path":"exercicios/Ex93.py","file_name":"Ex93.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"31145617872","text":"\n# coding: utf-8\n\n# In[9]:\n\n\nimport numpy as np\nimport pandas as pd\nimport os\n\n\n# In[31]:\n\n\ndef mmi():\n \"\"\"\n Load mmi data\n \"\"\"\n\n data = pd.read_csv('../Dataset/MMI_OHE.csv', header=None)\n train_y = []\n train_x = []\n val_x = []\n val_y = []\n test_x = []\n test_y = []\n\n no_of_samples = len(data)\n no_of_train_samples = int( no_of_samples)\n\n train_X = data.iloc[: no_of_train_samples, 1:]\n\n train_Y = data[: no_of_train_samples][[0]]\n\n train_x = np.asarray(train_X)\n\n train_y = np.asarray(train_Y)\n\n\n return (train_x, train_y)\n\n","repo_name":"KaiGuzman/Human-Emotion-Detection","sub_path":"LR/load_MMI.py","file_name":"load_MMI.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"43030551728","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.forms.models import modelformset_factory, inlineformset_factory\n\nfrom .models import TodoList, Entry\nfrom .forms import TodoForm, EntryForm\n\n\ndef todo_edit(request, name):\n context = {}\n todo = get_object_or_404(TodoList, name=name)\n todo_list_formset = modelformset_factory(\n Entry, fields='__all__', extra=0,\n )\n if request.method == 'POST':\n formset = todo_list_formset(request.POST)\n if formset.is_valid():\n formset.save()\n formset = todo_list_formset(\n initial=[{'todo_list': todo.id}],\n queryset=Entry.objects.filter(todo_list=todo),\n )\n entry_form = EntryForm(\n initial={'todo_list': todo.id}\n )\n entry_form.fields['sequence_number'].widget = forms.HiddenInput()\n entry_form.fields['todo_list'].widget = forms.HiddenInput()\n for form in formset:\n form.fields['todo_list'].widget = forms.HiddenInput()\n form.fields['sequence_number'].widget = forms.HiddenInput(\n attrs={'data-name': 'sequence_number'}\n )\n context['formset'] = formset\n context['todo'] = todo\n context['entry_form'] = entry_form\n return render(request, 'organizer/todo_edit.html', context)\n\n\ndef new_todo(request):\n todo_inline_formset = inlineformset_factory(\n TodoList, Entry, exclude=('sequence_number',)\n )\n if request.method == 'POST':\n todo = TodoForm(request.POST)\n if todo.is_valid():\n instance = todo.save(commit=False)\n instance.owner_id = request.user.id\n instance.save()\n todo_inline = todo_inline_formset(request.POST, instance=instance)\n if todo_inline.is_valid():\n todo_inline.save()\n return redirect('/')\n formset = todo_inline_formset()\n todo = TodoForm()\n context = {\n 'form': todo,\n 'formset': formset\n }\n return render(request, 'organizer/new_todo.html', context)\n\n\ndef new_entry(request):\n if request.method == 'POST':\n entry_form = EntryForm(request.POST)\n if entry_form.is_valid():\n instance = entry_form.save(commit=False)\n todo_name = instance.todo_list\n instance.save()\n return redirect(\n reverse(\n 'organizer:todo_edit',\n kwargs={'name': todo_name}\n )\n )\n","repo_name":"kharchenko-kh-ua/foobar_todo","sub_path":"apps/organizer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13930193525","text":"#Author: Amelia Krouse\n#Instructor: Juan Arias\n#Date: 2/5\n\nimport math\n\ndef main():\n num = int(input(\"Please enter an integer.\"))\n sign = 0\n result = 0\n for i in range(1, num, +2):\n if(sign == 0):\n result = result + 4/i\n sign = 1\n elif(sign == 1):\n result = result - 4/i\n sign = 0\n print(result)\n print(result - math.pi)\n\n\nmain()\n","repo_name":"dragongyrl/CMPT120-KROUSE-","sub_path":"pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1765849514","text":"# n = int(input(\"Enter n:\\n\"))\nn = 13\n\nif n<=1:\n print(\"No\")\nelse:\n for i in range(2,n):\n if n%i == 0:\n print(\"No\")\n break\n # This is a Python syntax\n else: # else will execute ,if for exehausted naturally \n print(\"Yes\")\n\nif n<=1:\n print(\"No\")\nelse:\n x = 2\n while x*x <= n:\n if n%x==0:\n print(\"No\")\n break\n x = x+1\n else:\n print(\"Yes\")\n","repo_name":"ShafayetSaad/Cplusplus-Codes","sub_path":"GeeksForGeeks/Python Programming Foundation/06. Loops/17_prime.py","file_name":"17_prime.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23924627601","text":"from PIL import Image\nimport customtkinter\nfrom tkinter.messagebox import showerror\nimport webbrowser\nimport datetime\n\ncustomtkinter.set_appearance_mode(\"System\") # Modes: \"System\" (standard), \"Dark\", \"Light\"\ncustomtkinter.set_default_color_theme(\"dark-blue\") # Themes: \"blue\" (standard), \"green\", \"dark-blue\"\n\napp = customtkinter.CTk()\napp.geometry(\"430x330\")\napp.title(\"HackYourClaculator 1.0\")\napp.iconbitmap('icon.ico')\n\ndef button_callback():\n global txt\n err = 0\n txt = entry_1.get()[::-1]\n txt = txt.lower()\n prt = ''\n for i in range(len(txt)):\n if txt[i] == 'l':\n tx = '7'\n elif txt[i] == 'b':\n tx = '9'\n elif txt[i] == 'h':\n tx = '4'\n elif txt[i] == 's':\n tx = '5'\n elif txt[i] == 'g':\n tx = '6'\n elif txt[i] == 'e':\n tx = '3'\n elif txt[i] == 'i':\n tx = '1'\n elif txt[i] == 'o':\n tx = '0'\n else:\n msg = \"Некоректный запрос! Возможно некоторые символы не подлежат шифрованию. Попробуйте снова \\\n Рекомендации: \\\n - Напишите слово латинскими буквами \\\n - Обратитесь к таблице символов (data.xlsx)\"\n showerror(title=\"Ошибка!\", message=msg)\n f = open('history.txt', 'a')\n now = datetime.datetime.now()\n text_1.insert(\"0.0\", '[' + str(now) + ']' + ' ВВОД: ' + txt[::-1] + ', ВЫВОД: ' + \"(Ошибка!)\" + '\\n')\n f.write('[' + str(now) + ']' + ' ВВОД: ' + txt[::-1] + ', ВЫВОД: ' + \"(Ошибка!)\" + '\\n')\n f.close()\n err = 1\n break\n prt += tx\n if err == 0:\n now = datetime.datetime.now()\n text_1.insert(\"0.0\", '[' + str(now) + ']' + ' ВВОД: ' + txt[::-1] + ', ВЫВОД: ' + prt + '\\n')\n f = open('history.txt', 'a')\n f.write('[' + str(now) + ']' + ' ВВОД: ' + txt[::-1] + ', ВЫВОД: ' + prt + '\\n')\n f.close()\n\ndef button_callback_2():\n webbrowser.open('info.txt')\n\ndef button_callback_3():\n webbrowser.open('history.txt')\n\nframe_1 = customtkinter.CTkFrame(master=app, )\nframe_1.pack(pady=15, padx=10, fill=\"both\", expand=True)\n\nlogo = customtkinter.CTkImage(dark_image=Image.open(\"txt.png\"), size=(400,40))\nlabel_1 = customtkinter.CTkLabel(master=frame_1, text='', image=logo)\nlabel_1.pack(pady=10, padx=0)\n\nentry_1 = customtkinter.CTkEntry(master=frame_1, placeholder_text=\"Введите слово\")\nentry_1.pack(pady=0, padx=10)\n\nbutton_1 = customtkinter.CTkButton(master=frame_1, text=\"Шифровать...\", command=button_callback)\nbutton_1.pack(pady=10, padx=10)\n\nlabel_1 = customtkinter.CTkLabel(master=frame_1, text='1.1 | by Waysoon', justify=customtkinter.LEFT)\nlabel_1.pack(pady=0, padx=0)\n\nbutton_2 = customtkinter.CTkButton(master=frame_1, text=\"?\", command=button_callback_2, width=20, height=20)\nbutton_2.place(x=5, y=140)\n\nbutton_3 = customtkinter.CTkButton(master=frame_1, text=\"#\", command=button_callback_3, width=20, height=20)\nbutton_3.place(x=30, y=140)\n\nframe_2 = customtkinter.CTkFrame(master=app)\nframe_2.pack(pady=10, padx=10, fill=\"both\", expand=True)\n\ntext_1 = customtkinter.CTkTextbox(master=frame_2, width=400, height=100)\ntext_1.pack(pady=5, padx=0)\ntext_1.insert(\"0.0\", ' Здесь будет отображена история за эту сессию...\\n')\n\n\napp.mainloop()\n","repo_name":"WaysoonProgramms/HackYourCalculator","sub_path":"HackYourCalculator.pyw","file_name":"HackYourCalculator.pyw","file_ext":"pyw","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41089697823","text":"#!/usr/bin/python3\n\n\ndef safe_print_integer(value):\n \"\"\"function that prints an integer with \"{:d}\".format().\n\n Args:\n value (int): The integer to be printed.\n\n Returns:\n True- if a value has been correctly printed.\n Otherwise - false.\n \"\"\"\n try:\n print(\"{:d}\".format(value))\n return (True)\n except (TypeError, ValueError):\n return (False)\n","repo_name":"mercymwangi/alx-higher_level_programming","sub_path":"0x05-python-exceptions/1-safe_print_integer.py","file_name":"1-safe_print_integer.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40452506764","text":"from collections import UserDict\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING: # pragma: no cover # noqa: SIM108\n _Base = UserDict[str, Any]\nelse:\n _Base = UserDict\n\n\nclass TaskiqState(_Base):\n \"\"\"\n State class.\n\n This class is used to store useful variables\n for later use.\n \"\"\"\n\n def __init__(self) -> None:\n self.__dict__[\"data\"] = {}\n\n def __getattr__(self, name: str) -> Any:\n try:\n return self.__dict__[\"data\"][name]\n except KeyError as exc:\n cls_name = self.__class__.__name__\n raise AttributeError(\n f\"'{cls_name}' object has no attribute '{name}'\",\n ) from exc\n\n def __setattr__(self, name: str, value: Any) -> None:\n self[name] = value\n\n def __delattr__(self, name: str) -> None:\n try:\n del self[name]\n except KeyError as exc:\n cls_name = self.__class__.__name__\n raise AttributeError(\n f\"'{cls_name}' object has no attribute '{name}'\",\n ) from exc\n\n def __str__(self) -> str:\n return \"TaskiqState(%s)\" % super().__str__()\n","repo_name":"taskiq-python/taskiq","sub_path":"taskiq/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":394,"dataset":"github-code","pt":"62"} +{"seq_id":"12144902474","text":"import torch\nimport os\nimport sys\nimport cv2\nimport csv\nimport numpy as np\nfrom torchvision.transforms import Resize, Grayscale, CenterCrop\nfrom torch.nn.functional import interpolate\n\nsrc = sys.argv[1]\ndst = 'cycle12/'\n\nos.makedirs('cycle12', exist_ok=True)\n\ntlist = []\n\nwith open('hmcfold.csv') as mfile:\n\n reader = csv.DictReader(mfile, delimiter=',')\n crop = CenterCrop((185, 185))\n res = Resize((224, 224))\n gray = Grayscale()\n\n for row in reader:\n\n start = int(row['start_frame']) - 1\n end = int(row['end_frame'])\n name = row['name']\n print(name, start, end)\n\n cap = cv2.VideoCapture(src + name + '.avi')\n cap.set(0, start)\n\n frames = []\n\n for i in range(end-start):\n ret, frame = cap.read()\n frames.append(frame)\n\n clip = np.stack(frames) / 255\n clip = torch.tensor(clip, dtype=torch.float32).permute(0, 3, 1, 2)\n clip = res(gray(clip))\n clip = res((crop(clip)))\n clip = clip.permute(1, 0, 2, 3).unsqueeze(0)\n clip = interpolate(clip, size=(12, 224, 224), mode='trilinear')[\n 0].permute(1, 0, 2, 3)\n print(clip.shape, clip.min(), clip.max())\n\n torch.save(clip, dst + name + '.pt')\n","repo_name":"BioMedIA-MBZUAI/mi-classification","sub_path":"mi/a2c/cycle.py","file_name":"cycle.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"62"} +{"seq_id":"11083256310","text":"from rest_framework import serializers\n\nfrom .models import Order\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n class Meta:\n\n model = Order\n fields = (\n 'id', \n 'name', \n 'product_id', \n 'user_id',\n 'paid_amount',\n 'get_payment_url',\n 'get_payment_status',\n 'payment_id',\n 'randomID'\n )","repo_name":"AlexanderKorataev/bubikopf","sub_path":"server/dj/order/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25189653895","text":"#!/usr/bin/python3\n\"\"\"\nReads stdin line by line and computes metrics\n\"\"\"\nimport sys\n\n\ncont = 0\nsz = 0\ncount_status = {\"200\": 0, \"301\": 0, \"400\": 0, \"401\": 0,\n \"403\": 0, \"404\": 0, \"405\": 0, \"500\": 0}\n\ntry:\n for line in sys.stdin:\n # print(\"$\", line)\n # ip = re.findall(r\"\\d+\\.\\d+\\.\\d+\\.\\d+\", line)\n # date = re.findall(r\"\\d+-\\d+-\\d+ [\\d.:]+\", line)\n # get = re.findall(r\"GET [/.\\d\\w\\s]+\", line)\n line_split = line.split()\n if len(line_split) > 2:\n status = line_split[-2]\n size = line_split[-1]\n sz += int(size)\n # count status\n if status in count_status:\n count_status[status] += 1\n # print(\"line split\", line_split)\n\n # print(\"MATCHES\", ip, date, get, status, size)\n cont += 1\n if cont % 10 == 0:\n print(\"File size: {}\".format(sz))\n for k, v in sorted(count_status.items()):\n if v != 0:\n print(\"{}: {}\".format(k, v))\nfinally:\n print(\"File size: {}\".format(sz))\n for k, v in sorted(count_status.items()):\n if v != 0:\n print(\"{}: {}\".format(k, v))\n","repo_name":"ikki2530/holbertonschool-interview","sub_path":"0x06-log_parsing/0-stats.py","file_name":"0-stats.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"1938778775","text":"num = 0\nSum = 0\nSum_odd = 0\nSum_even = 0\nfor num in range(1,101):\n\n Sum = num + Sum\n if num%2==0:Sum_even = Sum_even + num\n else:\n Sum_odd += num\nprint(Sum)\nprint(Sum_even)\nprint(Sum_odd)","repo_name":"Marsable/python_exec","sub_path":"Python_control/test04.py","file_name":"test04.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"39188136826","text":"def main():\n N = int(input())\n A = list(map(int, input().split()))\n cnt = 0\n max_int = 0\n for i in range(2, 1001):\n tmp_cnt = 0\n for a in A:\n if a % i == 0:\n tmp_cnt += 1\n if tmp_cnt >= cnt:\n max_int = i\n cnt = max(cnt, tmp_cnt)\n\n print(max_int)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kttaroha/AtCoder","sub_path":"src/ABC1xx/ABC18x/ABC182/ABC182B.py","file_name":"ABC182B.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13599656998","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\n# Site Metadata\nAUTHOR = u'Adam Greig'\nSITENAME = u'Negative Acknowledge'\nSITEURL = 'http://negativeacknowledge.com'\nTIMEZONE = 'Europe/London'\nDEFAULT_LANG = u'en'\n\n# Path Settings\nPATH = 'content/'\nFILENAME_METADATA = r'(?P\\d{4}-\\d{2}-\\d{2})_(?P.*)'\nYEAR_ARCHIVE_SAVE_AS = '{date:%Y}/index.html'\nMONTH_ARCHIVE_SAVE_AS = '{date:%Y}/{date:%m}/index.html'\nDAY_ARCHIVE_SAVE_AS = '{date:%Y}/{date:%m}/{date:%d}/index.html'\nARTICLE_URL = '{date:%Y}/{date:%m}/{date:%d}/{slug}'\nARTICLE_SAVE_AS = '{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'\nSTATIC_PATHS = ['images', 'extra/favicon.ico']\nEXTRA_PATH_METADATA = {'extra/favicon.ico': {'path': 'favicon.ico'}}\n\n# Content settings\nUSE_FOLDER_AS_CATEGORY = False\nDEFAULT_CATEGORY = 'Uncategorised'\nARTICLE_EXCLUDES = ['pages', '.ipynb_checkpoints']\n\n# Appearance Settings\nTHEME = 'themes/pelican-bootstrap3'\nBOOTSTRAP_THEME = 'journal'\nDISPLAY_PAGES_ON_MENU = True\nDISPLAY_CATEGORIES_ON_MENU = False\nDEFAULT_PAGINATION = 10\n\n# Plugins\nMARKUP = ('md', 'ipynb')\nPLUGIN_PATHS = ['plugins/']\nPLUGINS = ['ipythonnb']\n\n# Development settings, overriden by deployconf.py\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nRELATIVE_URLS = True\n\n\n# Web links\nLINKS = (('adamgreig.com', 'https://adamgreig.com'),\n ('randomskk.net', 'https://randomskk.net'),\n ('M0RND.net', 'http://m0rnd.net'),\n )\n\n# Social links\nSOCIAL = (('twitter', 'https://twitter.com/adamgreig'),\n ('github', 'https://github.com/adamgreig'),\n ('flickr', 'http://www.flickr.com/photos/randomskk'),\n )\n","repo_name":"adamgreig/negativeacknowledge","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"70190423559","text":"import socket\r\nimport pickle\r\nimport time\r\nDISCONNECT_MESSAGE = \"!DISCONNECT\"\r\nFORMAT = 'utf-8'\r\nc = socket.socket()\r\nADDR = ('localhost', 9999)\r\nc.connect(ADDR)\r\ndef send(msg):\r\n message = msg.encode(FORMAT)\r\n c.send(message)\r\n print(c.recv(1024).decode(FORMAT))\r\n print()\r\ndef ask_client_num():\r\n num_=input(\"Enter the number of players\")\r\n rounds=input(\"Enter the number of rounds\")\r\n c.send(pickle.dumps(num_))\r\n c.send(pickle.dumps(rounds))\r\nbool_ans=0 \r\nbool_ans=int(input(\"enter 1 if you started the game else enter 0\"))\r\nif bool_ans == 1:\r\n ask_client_num()\r\nelse:\r\n name = input(\"Enter your name..\")\r\n send(name)\r\n go=True\r\n while go:\r\n play = input(\"do you want to play the game? YES OR NO\")\r\n send(play)\r\n if play==\"NO\":\r\n go=False\r\n else:\r\n r = int(pickle.loads(c.recv(1024)))\r\n for i in range(r):\r\n print(pickle.loads(c.recv(1024)))\r\n num = input(\"Enter max val\")\r\n c.send(pickle.dumps(num))\r\n print(\"Thanks!! Time for the next round..\")\r\n time.sleep(1)\r\n time.sleep(5)\r\n print(pickle.loads(c.recv(1024)))\r\n print(c.recv(1024).decode(FORMAT))\r\nc.send(DISCONNECT_MESSAGE.encode())\r\n\r\n","repo_name":"anjali-2504/KRSSG_TASK_ROUND","sub_path":"TASK1_CASINO/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"1156265920","text":"import datetime\n\nfrom flask import Blueprint, flash, redirect, url_for, session\nfrom flask_login import login_required, current_user\nfrom flask_babel import gettext\nfrom flask_oauthlib.client import OAuthException\nfrom werkzeug.security import gen_salt\nfrom sqlalchemy_utils import Country\n\nfrom app import oauth, signals\nfrom app.models import db, UserLinkedinInfo\n\nfrom flask import current_app, request\n\n# Minimum amount of time, in seconds, that we think a token has to live.\n# Because the LinkedIn server gives us an `expires_in` based on the time\n# *they* sent their response, we should account for the amount of time it took\n# for the response to get back to us. We should also account for the\n# amount of time it might take for our OAuth2 request to reach their\n# server.\nMIN_TOKEN_LIFETIME = 120\n\n# These are the user info fields to retrieve for users from LinkedIn.\n# For more information, see:\n#\n# https://developer.linkedin.com/docs/fields/basic-profile\n#\nUSER_INFO_FIELDS = [\n 'id',\n 'first-name',\n 'last-name',\n 'maiden-name',\n 'formatted-name',\n 'phonetic-first-name',\n 'phonetic-last-name',\n 'formatted-phonetic-name',\n 'headline',\n 'location',\n 'industry',\n 'current-share',\n 'num-connections',\n 'num-connections-capped',\n 'summary',\n 'specialties',\n 'positions',\n 'picture-url',\n 'picture-urls::(original)',\n 'site-standard-profile-request',\n 'api-standard-profile-request',\n 'public-profile-url',\n]\n\nlinkedin = oauth.remote_app(\n 'linkedin',\n app_key='LINKEDIN',\n request_token_url=None,\n request_token_params={\n 'scope': 'r_basicprofile',\n 'state': lambda: session['linkedin_state']\n },\n base_url='https://api.linkedin.com/',\n authorize_url='https://www.linkedin.com/uas/oauth2/authorization',\n access_token_method='POST',\n access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',\n)\n\nviews = Blueprint('linkedin', __name__)\n\ndef retrieve_access_token(user):\n if user.linkedin is not None:\n if user.linkedin.expires_in.total_seconds() > MIN_TOKEN_LIFETIME:\n return (user.linkedin.access_token, '')\n\ndef store_access_token(user, resp):\n expiry = datetime.datetime.now() + datetime.timedelta(\n seconds=resp['expires_in']\n )\n\n if user.linkedin is None:\n user.linkedin = UserLinkedinInfo()\n user.linkedin.access_token = resp['access_token']\n user.linkedin.access_token_expiry = expiry\n db.session.add(user)\n db.session.commit()\n\ndef get_user_info(user):\n # https://developer-programs.linkedin.com/documents/field-selectors\n url = 'v1/people/~:(%s)?format=json' % ','.join(USER_INFO_FIELDS)\n token = retrieve_access_token(user)\n\n if token is None:\n raise OAuthException(\n 'Access token unavailable or expired for %s' % user.email\n )\n\n res = linkedin.get(url, token=token)\n\n if res.status != 200:\n raise OAuthException('Server returned HTTP %d: %s' % (\n res.status,\n repr(res.data)\n ), data=res.data)\n\n return res.data\n\ndef update_user_fields_from_profile(user, info):\n location = info.get('location')\n if location:\n if 'name' in location and not user.city:\n user.city = location['name']\n if 'country' in location and 'code' in location['country']:\n country_code = location['country']['code'].upper()\n try:\n user.country = Country(country_code)\n except ValueError:\n pass\n\n positions = info.get('positions')\n if positions and len(positions.get('values', [])) >= 1:\n position = positions['values'][0]\n org = position.get('company') and position['company'].get('name')\n if org and not user.organization:\n user.organization = org\n if position.get('title') and not user.position:\n user.position = position['title']\n\n if info.get('headline') and not user.position:\n user.position = info['headline']\n\ndef update_user_info(user):\n info = get_user_info(user)\n user.linkedin.user_info = info\n update_user_fields_from_profile(user, info)\n db.session.commit()\n\n@views.route('/linkedin/deauthorize')\n@login_required\ndef deauthorize():\n if current_user.linkedin:\n db.session.delete(current_user.linkedin)\n db.session.commit()\n signals.user_changed_profile.send(\n current_app._get_current_object(),\n user=current_user._get_current_object(),\n avatar_changed=True\n )\n flash(gettext(u'Disconnected from LinkedIn.'))\n return redirect(url_for('views.my_profile'))\n\n@views.route('/linkedin/authorize')\n@login_required\ndef authorize():\n session['linkedin_state'] = gen_salt(10)\n return linkedin.authorize(\n callback=url_for('linkedin.callback', _external=True)\n )\n\n@views.route('/linkedin/callback')\n@login_required\ndef callback():\n state = request.args.get('state')\n if not state or session.get('linkedin_state') != state:\n return 'Invalid state'\n del session['linkedin_state']\n\n resp = linkedin.authorized_response()\n if resp is None:\n flash(gettext(u'Connection with LinkedIn canceled.'), 'error')\n else:\n store_access_token(current_user, resp)\n update_user_info(current_user)\n signals.user_changed_profile.send(\n current_app._get_current_object(),\n user=current_user._get_current_object(),\n avatar_changed=True\n )\n flash(gettext(u'Connection to LinkedIn established.'))\n return redirect(url_for('views.my_profile'))\n","repo_name":"GovLab/noi2","sub_path":"app/linkedin.py","file_name":"linkedin.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"62"} +{"seq_id":"18408952144","text":"import sys\nimport os\nfrom pathlib import Path\nsys.path.append(os.path.dirname(Path(__file__).parents[0]))\n\nimport os\nfrom typing import List, Union\n\nimport numpy as np\nimport pandas as pd\nimport re\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.indexes.base import Index\nimport tensorflow as tf\n\nfrom config import (ID, ORDERED_CATEGORIES, ORIGINAL_DATA_PATH, PREPARED_DATA_PATH, TARGET, TEXT_COLS,\n UNORDERED_CATEGORIES)\n\n\ndef _process_ordered_categories(\n data: DataFrame, ordered_categories: List, fillna_value: Union[int, None] =-1) -> DataFrame:\n\n for ordered_category in ordered_categories:\n data[ordered_category] = data[ordered_category].fillna(fillna_value)\n data[ordered_category] = data[ordered_category].astype('category').cat.as_ordered()\n return data\n\ndef _process_unorder_categories(\n data: DataFrame, unordered_categories: List,\n train_idx: Index, test_idx: Index, keep_only_common_categories: bool = True) -> DataFrame:\n for unordered_category in unordered_categories:\n data[unordered_category] = data[unordered_category].fillna('NA')\n\n if keep_only_common_categories:\n common_categories = (\n set(data.loc[train_idx, unordered_category]) \n & set(data.loc[test_idx, unordered_category]))\n mask = ~data[unordered_category].isin(common_categories)\n data.loc[mask, unordered_category] = 'ANOTHER'\n data[unordered_category] = data[unordered_category].astype('category')\n return data\n\n\ndef _process_text_cols(data: DataFrame, text_cols: List, make_lower: bool = False) -> DataFrame:\n def __clean_numbers(x):\n if bool(re.search(r'\\d', x)):\n x = re.sub('[0-9]{9,10}', '9201234567', x)\n x = re.sub('[0-9]{5,8}', '11111', x)\n x = re.sub('[0-9]{4}', '1111', x)\n x = re.sub('[0-9]{3}', '111', x)\n x = re.sub('[0-9]{2}', '11', x)\n x = re.sub('[0-9]{1}', '1', x)\n return x\n \n data[text_cols] = data[text_cols].fillna('NA').astype('str')\n\n for text_col in text_cols:\n data[text_col] = data[text_col].str.replace('\\xa0', 'SMTH_SPECIAL')\n data[text_col] = data[text_col].str.replace('\\ufeff', '')\n data[text_col] = data[text_col].str.replace('\\u200d', ' ')\n data[text_col] = data[text_col].str.replace('ё', 'е')\n data[text_col] = data[text_col].str.replace('…', ' ... ')\n for quote in [\"’\", \"‘\", \"´\", \"`\"]:\n data[text_col] = data[text_col].str.replace(quote, \"'\")\n data[text_col] = data[text_col].apply(__clean_numbers)\n \n if make_lower:\n for col in text_cols:\n data[col] = data[col].str.lower()\n \n return data\n \n\ndef prepare_data(train: pd.DataFrame, test: pd.DataFrame) -> pd.DataFrame:\n train=train.set_index(ID)\n train_idx = train.index\n\n test=test.set_index(ID)\n test_idx = test.index\n\n target = train[TARGET]\n target = target.str.get_dummies(sep=',').astype(np.int8)\n data = pd.concat((train.drop(TARGET, axis=1), test))\n\n data = _process_text_cols(data, TEXT_COLS, make_lower=False)\n data = _process_ordered_categories(data, ORDERED_CATEGORIES)\n data = _process_unorder_categories(\n data, UNORDERED_CATEGORIES, train_idx, test_idx, keep_only_common_categories=False\n )\n \n return data.iloc[:len(train)], data.iloc[len(train):], target\n\ndef main() -> None:\n\n train, test, target = (\n prepare_data(\n train=pd.concat([\n pd.read_csv(os.path.join(ORIGINAL_DATA_PATH, 'train.csv')),\n pd.read_csv(os.path.join(ORIGINAL_DATA_PATH, 'HeadHunter_new_train.csv'))\n ]),\n test=pd.read_csv(os.path.join(ORIGINAL_DATA_PATH, 'test.csv'))\n )\n )\n\n train.to_pickle(os.path.join(PREPARED_DATA_PATH, 'train.pkl'), protocol=4)\n test.to_pickle(os.path.join(PREPARED_DATA_PATH, 'test.pkl'), protocol=4)\n target.to_pickle(os.path.join(PREPARED_DATA_PATH, 'target.pkl'), protocol=4)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ashimko/boosters_hh","sub_path":"features/preprocess_original_data.py","file_name":"preprocess_original_data.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11944599522","text":"\"\"\" create event in ics format (vcal), print as string an db64 \"\"\"\nimport datetime\nimport base64\nimport urllib.request\nimport json\nimport pystache\nimport sys\n\n# see also https://www.kanzaki.com/docs/ical/dateTime.html\n\nclass Event():\n def __init__(self):\n self._createWrapper()\n\n def _createWrapper(self):\n self.head = \"\"\n self.foot = \"\"\n self.event = \"\"\n self.head += u\"BEGIN:VCALENDAR\\n\"\n self.head += u\"VERSION:2.0\\n\"\n self.head += u\"CALSCALE:GREGORIAN\\n\"\n self.head += u\"PRODID:OK Lab Karlsruhe\\n\"\n self.head += u\"BEGIN:VEVENT\\n\"\n\n self.foot += u\"END:VEVENT\\n\"\n self.foot += u\"END:VCALENDAR\\n\"\n\n def add(self,a,b):\n #print(\"a:\",a,\", b:\",b)\n # map plaintext description to VCAL items\n swin = (\"summary\",\n \"description\",\n \"start\", #dd.mm.yyyy\n \"end\", #dd.mm.yyyy\n \"duration\", #hh:mm\n \"location\"\n )\n swout = [u\"SUMMARY:\",\n u\"DESCRIPTION:\",\n u\"DTSTART;TZID=Europe/Berlin:\",\n u\"DTEND;TZID=Europe/Berlin:\",\n u\"DURATION:\", # as period, like PT2H0M0S for 2 hours\n u\"LOCATION:\"]\n try:\n item = swin.index(a) # throws on wrong items\n self.event += swout[item] # add property\n if a == \"start\" or a == \"end\":\n d = datetime.datetime.strptime(b, \"%d.%m.%Y %H:%M\")\n self.event += d.isoformat(timespec='seconds').replace(\"-\",\"\").replace(\":\",\"\")\n elif a == \"duration\":\n self.event += \"PT\" + b.split(\":\")[0] + \"H\" + b.split(\":\")[1] + \"M00S\"\n else:\n self.event += b\n self.event += u\"\\n\"\n \n except ValueError:\n print(\"Invalid date item: \",a)\n raise\n \n def get(self):\n return self.head + self.event + self.foot\n\n################################\n \nevent = Event()\n\n### simple test\n##event.add(\"summary\",\"Lab Meeting\")\n##start = \"02.03.2020 13:15\"\n##end = \"02.03.2020 15:30\"\n##event.add('start',start)\n##event.add('end', end)\n##event.add('description', \"Very nice socializing event\")\n##event.add('location', \"Karlsruhe Digital Lab\")\n##\n##print(event.get())\n##print(base64.b64encode(event.get().encode()).decode(\"utf-8\"))\n\n# link template\nhref = \"ICS\"\n\n# data url to get the original json\n# url = \"https://raw.githubusercontent.com/CodeforKarlsruhe/labSchedule/master/karlsruhe.json\"\n\n# use the simple local version from the codeforka repo\n# for use with running hugo server\n#url = \"https://raw.githubusercontent.com/CodeforKarlsruhe/codeforka/master/static/schedule/schedule.json\"\nurl = \"http://localhost:1313/schedule/schedule.json\"\n\ntry:\n req = urllib.request.Request(url)\n with urllib.request.urlopen(req) as response:\n data = response.read()\nexcept urllib.error.HTTPError as err:\n if err.code == 404 or err.code == 500 :\n print(\"URL not found: \",url)\n sys.exit(0)\n else:\n raise\n sys.exit(0)\n\ndata = json.loads(data)\nfor dd in enumerate(data): # all languages\n lidx = dd[0]\n lang = dd[1]\n for ee in enumerate(data[lang]):\n idx = ee[0]\n e = ee[1]\n if not \"ics\" in e:\n event = Event()\n event.add(\"summary\",e[\"title\"])\n start = e[\"date\"]\n try:\n s = datetime.datetime.strptime(start, \"%d.%m.%Y %H:%M\")\n except ValueError:\n start += \" 19:00\"\n pass\n print(\"start: \",start)\n event.add('start',start) \n\n if not \"duration\" in e:\n e[\"duration\"] = \"02:00\" # default 2 hours\n try:\n event.add(\"duration\",e[\"duration\"])\n except ValueError:\n print (\"invalid duration\")\n\n #event.add('end', end)\n if not \"location\" in e:\n if lang == \"en\":\n e[\"location\"] = \"Town hall, Digital Lab\"\n else:\n e[\"location\"] = \"Rathaus, Digital Labor\"\n \n event.add('location', e[\"location\"])\n print(\"event: \",event.get())\n context = {\"ics\":base64.b64encode(event.get().encode()).decode(\"utf-8\"),\n \"date\":e[\"date\"].split(\" \")[0].replace(\".\",\"\")}\n link = pystache.render(href,context)\n data[lang][idx][\"ics\"] = link\n\nwith open(\"out.json\",\"w\") as f:\n f.write(json.dumps(data))\n\nprint(\"new json generated: out.json\");\nprint(\"Update json file on OK Lab server with this content\")\n\n","repo_name":"CodeforKarlsruhe/codeforka","sub_path":"tools/makeIcs.py","file_name":"makeIcs.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21052363958","text":"import random\nimport textwrap\n\ndef print_bold(msg, end=\"\\n\"):\n\tprint(\"\\033[1m\" + msg + \"\\033[0m\", end =end)\n\ndef print_dotted_lines(width=100):\n\tprint(\"_\"*width)\n\ndef occupy_huts(occupants):\n\thut = []\n\tfor xc in range(5):\n\t\toccupant = random.choice(occupants)\n\t\thut.append(occupant)\n\n\treturn hut\n\ndef game_mission():\n\twidth = 100\n\toccupants = [\"enemy\", \"native\", \"no_occupant\"]\n\tprint_dotted_lines()\n\tgame_title = \"War of Alex and Orchs\"\n\tprint_bold(msg = game_title)\n\tmsg = (\"The war between humans and Orchs has begun. Alexander was one of\"\n\t\t\" the brave knights gaurding the eastern plains belong to his kingdom.\"\n\t\t\" Due to war strategy to defend themselves orchs intruded into huts of the people\"\n\t\t\" Alex doesn't know this he decided to get some water from the occupants\"\n\t\t\" nearby and take rest for sometime. so he went towards village\"\n\t\t\" where 5 huts was their since no one was their he decided to knock at the door.\")\n\tprint(textwrap.fill(msg, width= width))\n\tprint(\"\\n\")\n\tprint_bold(msg=\"Mission:\")\n\tprint(\"Choose the hut where Alexander can get some water and rest\")\n\tprint_dotted_lines()\n\n\treturn occupants\n\ndef choose_huts():\n\tmsg = \"choose a hut number to enter [1-5]: \"\n\t#print_bold(msg=msg)\n\tinput_user = input(\"\\n\" + msg)\n\tidx = int(input_user)\n\n\treturn idx\n\ndef revealing_occupants(hut, idx):\n\tprint(\"Revealing the occupants\")\n\tmsg = \"\"\n\tfor x in range(len(hut)):\n\t\tif x +1 == idx:\n\t\t\toccupant_info = \"Hut {} : {}\".format(idx, hut[x])\n\t\t\tprint_bold(msg=occupant_info)\n\tprint_dotted_lines()\n\ndef entering_hut(hut, idx, health_meter):\n\tmsg = \"Entering hut...\"\n\tprint_bold(msg=msg, end = \" \")\n\tif hut[idx-1] ==\"enemy\":\n\t\tprint_bold(msg=\"Enemy Spotted..!!!\", end=\" \")\n\t\tcontinue_attack = True\n\t\twhile continue_attack == True:\n\t\t\tuser_decision = input(\"\\n Do you wish to fight yes(y)/no(n)? \")\n\t\t\tif user_decision == \"n\":\n\t\t\t\tprint(\"Running away with current heath status\")\n\t\t\t\tshow_health_meter(health_meter)\n\t\t\t\tfail = \"YOU LOSE :( Better luck next time...\"\n\t\t\t\tprint_bold(msg = fail)\n\t\t\t\tbreak\n\n\t\t\thealth_meter = attack_function(health_meter)\n\n\t\t\tif health_meter['orchs'] <=0:\n\t\t\t\tprint_bold(msg = \"Good Job..!!! :) You defeated the orchs in this hut {}\".format(idx))\n\t\t\t\tshow_health_meter(health_meter)\n\t\t\t\tprint_dotted_lines()\n\t\t\t\tbreak\n\n\t\t\tif health_meter['alex'] <= 0:\n\t\t\t\tprint_bold(msg = \"You Lose..!!! :( Better luck next time.\")\n\t\t\t\tprint_dotted_lines()\n\t\t\t\tbreak\n\telse:\n\t\tpassed = \"CONGRATULATIONS :) You Win..!!!\"\n\t\tprint_bold(msg = passed)\n\tprint_dotted_lines()\n\ndef reset_health_meter():\n\thealth_meter ={}\n\thealth_meter['alex'] = 50\n\thealth_meter['orchs'] = 30\n\n\treturn health_meter\n\ndef show_health_meter(health_meter):\n\tprint(\"\\n\")\n\tprint(\"Alexander Health: \" , health_meter['alex'])\n\tprint(\"Orch's Health: \", health_meter['orchs'])\n\ndef attack_function(health_meter):\n\t# programming attack of alex has a prob of 0.6 and orch have 0.4\n\tplayer_rand_choice = ['alex']* 4 + ['orchs']*6\n\thit_player = random.choice(player_rand_choice)\n\thit_strength = random.choice([5,10,15])\n\thit_points = health_meter[hit_player]\n\thealth_meter[hit_player] = max(hit_points - hit_strength, 0)\n\tshow_health_meter(health_meter)\n\n\treturn health_meter\n\ndef run_application():\n\tkeep_playing = \"y\"\n\toccupants = game_mission()\n\thealth_meter = reset_health_meter()\n\twhile keep_playing == \"y\":\n\t\thuts = occupy_huts(occupants = occupants)\n\t\tuser_choice = choose_huts()\n\t\trevealing_occupants(hut = huts, idx=user_choice)\n\t\tentering_hut(hut=huts, idx = user_choice, health_meter = health_meter)\n\t\tkeep_playing = input(\"Play Again? Yes(y), No(n): \")\n\nif __name__ == \"__main__\":\n\trun_application()","repo_name":"instigateideas/Instigate_Ideas","sub_path":"alex_orchs_war_game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"2197424971","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n new_head, new_tail = None, None\n p = head\n while p:\n n = p.next\n if n is not None and n.val == p.val:\n while n:\n if n.val != p.val:\n break\n n = n.next\n else:\n if new_head is None:\n new_head = p\n new_tail = p\n else:\n new_tail.next = p\n new_tail = p\n new_tail.next = None\n p = n\n return new_head\n","repo_name":"willcoderwang/leetcode","sub_path":"82.py","file_name":"82.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"31330517620","text":"from __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\nfrom . import CreatePreferenceData\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BillGroup',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(default='default_bill_group', max_length=20, unique=True)),\n ('balance', models.FloatField(default=0)),\n ('charged', models.FloatField(default=0)),\n ('used_time', models.BigIntegerField(default=0)),\n ('used_credits', models.FloatField(default=0)),\n ('description', models.CharField(blank=True, default='', max_length=200)),\n ('charge_rate', models.FloatField(default=1)),\n ('last_operation_time', models.DateTimeField(null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Deposit',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('credits', models.FloatField(default=0)),\n ('apply_time', models.DateTimeField(null=True)),\n ('approved_time', models.DateTimeField(null=True)),\n ('bill_group', models.ForeignKey(db_column='bill_group', null=True, on_delete=django.db.models.deletion.CASCADE, to='user.BillGroup')),\n ],\n ),\n migrations.CreateModel(\n name='ImportRecord',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('row', models.IntegerField()),\n ('action_username', models.CharField(max_length=32)),\n ('task_id', models.CharField(max_length=40, null=True)),\n ('username', models.CharField(max_length=32)),\n ('role', models.IntegerField(choices=[(300, b'admin'), (200, b'operator'), (100, b'user')], default=100)),\n ('first_name', models.CharField(max_length=30, null=True)),\n ('last_name', models.CharField(max_length=30, null=True)),\n ('bill_group_name', models.CharField(max_length=20)),\n ('email', models.EmailField(max_length=254, null=True)),\n ('status', models.CharField(max_length=24, null=True)),\n ('error_message', models.CharField(max_length=50, null=True)),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ('update_time', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='LibuserConfig',\n fields=[\n ('key', models.TextField(primary_key=True, serialize=False)),\n ('value', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Preference',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=256)),\n ('value', models.TextField()),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ('modify_time', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(error_messages={'username': 'User already exists'}, max_length=32, unique=True)),\n ('first_name', models.CharField(max_length=30, null=True)),\n ('last_name', models.CharField(max_length=30, null=True)),\n ('email', models.EmailField(max_length=254, null=True)),\n ('role', models.IntegerField(choices=[(300, b'admin'), (200, b'operator'), (100, b'user')], default=100)),\n ('last_login', models.DateTimeField(null=True)),\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now)),\n ('last_operation_time', models.DateTimeField(null=True)),\n ('fail_chances', models.IntegerField(default=0)),\n ('effective_time', models.DateTimeField(auto_now_add=True)),\n ('bill_group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='bill_members', to='user.BillGroup')),\n ],\n ),\n migrations.AddField(\n model_name='preference',\n name='user',\n field=models.ForeignKey(help_text=b'null:scope is global,otherwise scope is local', null=True, on_delete=django.db.models.deletion.CASCADE, to='user.User'),\n ),\n migrations.AlterUniqueTogether(\n name='importrecord',\n unique_together=set([('action_username', 'row'), ('action_username', 'username')]),\n ),\n migrations.AddField(\n model_name='deposit',\n name='user',\n field=models.ForeignKey(db_column='user', null=True, on_delete=django.db.models.deletion.CASCADE, to='user.User'),\n ),\n migrations.AlterUniqueTogether(\n name='preference',\n unique_together=set([('name', 'user')]),\n ),\n CreatePreferenceData(\n name='monitor.policy.node.status',\n value='cpu_core',\n ),\n ]\n","repo_name":"lenovo/Antilles","sub_path":"antilles-core/openHPC_web_project/antilles/user/migrations/0001_antilles_1_0_0.py","file_name":"0001_antilles_1_0_0.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"62"} +{"seq_id":"22995439637","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom .models import User\n\n\nclass CustomUserAdmin(UserAdmin):\n model = User\n fieldsets = (\n *UserAdmin.fieldsets, # unpacked default admin fieldsets\n (\n 'Extended attributes',\n {\n 'fields': (\n 'is_manager',\n 'is_employee',\n 'location',\n 'phone_no',\n 'job_id',\n )\n }\n )\n )\n list_display = [\n 'email',\n 'username',\n 'is_staff',\n 'location',\n 'phone_no',\n 'updated_date',\n 'job_id',\n 'is_manager',\n 'is_employee'\n ]\n list_filter = [\n 'is_manager',\n 'is_employee',\n 'is_staff',\n 'is_superuser',\n 'is_active'\n ]\n\n\nadmin.site.register(User, CustomUserAdmin)\n","repo_name":"mrshanas/learning-drf","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"31086671973","text":"import random\n\n\nfrom aiogram import types, Dispatcher\n\n\n\nimport bot_data\nimport candies\nfrom logger import log\nfrom user import users, check_user\nfrom user_kb import kb\n\n\n# #\n# Сделать потом нормальное меню, вместо текущей версии\n# #\n\n\n# /start и некоторые другие\nasync def command_start( message: types.Message ):\n user_id = message.from_user.id\n await log( \"handlers\", \"command_start\", f\"{ user_id }: { message.text }\" )\n\n user_name = message.from_user.full_name\n if await check_user( user_id, \"game_mode\" ) == True:\n await message.answer( f\"{ user_name }, сначала выйдите из игры > /stop\" )\n await message.answer( f\"{ user_name }{ bot_data.main_menu }\", reply_markup=kb )\n\n\n# async def candies( message: types.Message ): # /candies game\n# if len( message.text.split() ) == 1 or message.text.split()[1] == game:\n#\n# if message.text.split()[1] == rules:\n# await message.answer( f\"{ bot_data.rules }\" )\n\n\n# Обработчик /candies_game # Переделать на /candies game - вызов с параметром\nasync def candies_game( message: types.Message ): # /candies game\n user_id = message.from_user.id\n await log( __name__, \"candies_game\", f\"{ user_id }: { message.text }\" )\n\n if await check_user( user_id, \"game_mode\" ) == False:\n users[ user_id ][ \"game_mode\" ] = True # candies_mode = True\n await candies.new_game( message )\n elif message.text.isdigit():\n if await candies.player( message ):\n users[ user_id ][ \"game_mode\" ] = False\n else:\n await message.answer( \"Вы должны взять не менее 1 и не более 28 конфет...\" )\n\n\n# Обработчик /candies_rules # Переделать на /candies rules - вызов с параметром\nasync def candies_rules( message: types.Message ):\n user_id = message.from_user.id\n await log( __name__, \"candies_rules\", f\"{ user_id }: { message.text }\" )\n\n await message.answer( f\"{ bot_data.rules }\", reply_markup=kb )\n\n\n# Выход из режима игры\nasync def stop_game( message: types.Message ):\n user_id = message.from_user.id\n await log( __name__, \"stop_game\", f\"{ user_id }: { message.text }\" )\n\n if await check_user( user_id, \"game_mode\" ) == True:\n users[ user_id ][ \"game_mode\" ] = False\n\n await message.answer( \"Жаль, что ты уже уходишь 😢...\", reply_markup=kb )\n await command_start( message )\n\n else:\n await message.answer( \"Вы сейчас не играете в игру...\", reply_markup=kb )\n\n\n# Обработчик всех сообщений\nasync def not_command( message: types.Message ):\n user_id = message.from_user.id\n await log( __name__, \"not_command\", f\"{ user_id }: { message.text }\" )\n\n if await check_user( user_id, \"game_mode\" ) == True and message.text.isdigit(): # if candies_mode:\n await candies_game( message )\n else:\n await message.answer( f\"Я пока не умею обрабатывать: \\\"{message.text}\\\"\" )\n\n\n# Регистрируем handlers\ndef register_handlers_client(dp: Dispatcher):\n dp.register_message_handler( command_start, commands=[ \"start\", \"старт\", \"help\", \"помощь\", \"menu\", \"меню\" ] )\n\n dp.register_message_handler( candies_game, commands=[ \"candies_game\", \"candies\", \"sweets\", \"конфеты\", \"game\", \"игра\", \"играть\" ] )\n dp.register_message_handler( candies_rules, commands=[ \"candies_rules\", \"rules\", \"правила\" ] )\n\n dp.register_message_handler( stop_game, commands=[ \"stop\", \"стоп\", \"выйти\" ] )\n\n dp.register_message_handler( not_command )\n","repo_name":"BloodRaven707/Python_9_HW","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"23315832091","text":"#!/usr/bin/env python3\n\nimport argparse\nimport re\nimport yaml\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-o', '--output')\n ap.add_argument('-p', '--preamble')\n ap.add_argument('yamls', nargs='*')\n args = ap.parse_args()\n people = deduplicate(authors(args.yamls))\n with open(args.output, 'w', encoding='utf-8') as out:\n out.write(open(args.preamble, encoding='utf-8').read())\n out.writelines(map('{}\\n'.format, people))\n\ndef authors(yaml_paths):\n ''' Yield all authors/contributors named in the **_LICENSE.yaml files. '''\n for path in yaml_paths:\n for small_dict in yaml.safe_load(open(path, encoding='utf-8')):\n for credited_name in small_dict:\n # The credited name is basically a free-form field. Naively attempt to break it down.\n for component in re.split(r' *[(),] *| and | \\+ ', credited_name):\n component = re.sub(r'^(modified |retextured |made transparent |made |components |from |by |based on |works? |and )*', '', component, flags=re.I)\n yield component\n\ndef deduplicate(raw_authors):\n ''' Try to dedup the redundancies in the extracted authors, e.g., the same person listed\n both with and without an email address. We get these because of laziness, but from crediting\n someone once by full name and in abbreviated form when they co-author something else.'''\n l = sorted(filter(None, raw_authors), key=len)\n for i in range(len(l)-1, -1, -1):\n k = l[i].lower()\n if any(k in s.lower() for s in l[i+1:]):\n del l[i]\n l.sort(key=str.lower)\n return l\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"naev/naev","sub_path":"utils/build/gen_authors.py","file_name":"gen_authors.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":609,"dataset":"github-code","pt":"62"} +{"seq_id":"17350477414","text":"def solution(progresses, speeds):\n answer = []\n day_list = []\n for idx,progress in enumerate(progresses):\n day = 0\n while progress < 100:\n progress += speeds[idx]\n day += 1\n day_list.append(day)\n\n curr = day_list.pop(0)\n cnt = 1 \n while day_list:\n if curr < day_list[0]:\n answer.append(cnt)\n curr = day_list.pop(0)\n cnt = 1\n continue\n day_list.pop(0)\n cnt += 1\n answer.append(cnt)\n return answer\n\n\n","repo_name":"Hotsumm/BOJ-Programmers-Solution","sub_path":"Programmers/고득점Kit/기능개발.py","file_name":"기능개발.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"40946199690","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport requests\nimport logging\nimport boto3\nimport pprint\nimport threading\nimport socket\nfrom requests.auth import HTTPBasicAuth\nfrom botocore.exceptions import ClientError\nfrom time import sleep\n\n\nclass cattleman(object):\n def __init__(self):\n self.api_user = os.getenv('RANCHER_USER')\n self.api_key = os.getenv('RANCHER_KEY')\n self.api_url = os.getenv('RANCHER_URL')\n self.asg_name = os.getenv('ASG_NAME')\n if not self.api_user or not self.api_key or not self.api_url or not self.asg_name:\n logger.error(\"RANCHER_USER, RANCHER_KEY, RANCHER_URL and ASG_NAME are required env vars\")\n sys.exit(1)\n self.api_project = self.get_project(os.getenv('RANCHER_ENV', 'Default'))\n\n def get_project(self, rancher_env):\n projects = requests.get('{0}/v1/projects'.format(self.api_url),\n auth=HTTPBasicAuth(self.api_user, self.api_key))\n\n for project in projects.json()['data']:\n if rancher_env == project['name']:\n return project['id']\n else:\n logger.error(\"Specificed rancher environment or 'Default' does not exist\")\n\n def test_connection(self):\n logger.info(\"Connecting to the Rancher API...\")\n connection = requests.get('{0}/v1/'.format(self.api_url),\n auth=HTTPBasicAuth(self.api_user, self.api_key))\n if connection.status_code == 200:\n logger.info(\"Connected to the Rancher API\")\n else:\n logger.error(connection.json())\n\n def get_all_memory_info(self):\n memory = {}\n hosts = requests.get('{0}/v1/projects/{1}/hosts/'.format(self.api_url,\n self.api_project),\n auth=HTTPBasicAuth(self.api_user, self.api_key))\n for host in hosts.json()['data']:\n memory[host['id']] = host['info']['memoryInfo']\n return memory\n\n def decider(self):\n memory = self.get_all_memory_info()\n logger.debug(\"Memory Dict:\\n\" + pprint.pformat(memory))\n hosts = len(memory.keys())\n low_mem = []\n for host, mem in memory.items():\n if mem['memAvailable'] / mem['memTotal'] <= 0.35:\n low_mem.append(host)\n if len(low_mem) == hosts:\n logger.info(\"Trigger Scale Up\")\n self.scale_up()\n else:\n logger.info(\"Doing nothing..\")\n\n def scale_up(self):\n client = boto3.client('autoscaling')\n current_capacity = client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.asg_name])['AutoScalingGroups'][0]['DesiredCapacity']\n desired_capacity = current_capacity + 1\n try:\n response = client.set_desired_capacity(\n AutoScalingGroupName=self.asg_name,\n DesiredCapacity=desired_capacity,\n HonorCooldown=True)\n except ClientError as e:\n logger.error(\"Cooldown in effect, no action taken\")\n\n\ndef ping(delay, run_event):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('0.0.0.0', 1313)\n logger.debug('starting up on {0}'.format(server_address))\n sock.bind(server_address)\n sock.listen(1)\n\n while run_event.is_set():\n logger.debug('waiting for a connection')\n connection, client_address = sock.accept()\n try:\n logger.debug('client connected: {0}'.format(client_address))\n message = b'PONG'\n connection.sendall(message)\n connection.close()\n finally:\n connection.close()\n\n\ndef run_cattleman(delay, run_event):\n app = cattleman()\n app.test_connection()\n while run_event.is_set():\n app.decider()\n logger.info('Sleeping 1 minute')\n sleep(60)\n\nif __name__ == \"__main__\":\n # setup_logging\n logger = logging.getLogger('Cattleman')\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n ch.setLevel(logging.INFO)\n logger.addHandler(ch)\n logger.debug('Logging Started')\n\n run_event = threading.Event()\n run_event.set()\n\n jobs = []\n\n main_thread = threading.Thread(target=run_cattleman, args=(1, run_event))\n jobs.append(main_thread)\n\n status_thread = threading.Thread(target=ping, args=(1, run_event))\n jobs.append(status_thread)\n\n try:\n for job in jobs:\n job.start()\n except (KeyboardInterrupt, SystemExit):\n run_event.clear()\n for job in jobs:\n job.join()\n","repo_name":"mjgorman/cattleman","sub_path":"cattleman.py","file_name":"cattleman.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"14639807371","text":"import sys\r\nimport math\r\n\r\nresposta = '\\0'\r\nnumNotas = 0\r\nnota=0\r\nsoma = 0\r\nmaximo = 0\r\n\r\nprint()\r\nprint(\"Insira um conjunto de notas : \")\r\ncontador = 0\r\nwhile contador < 255:\r\n print(\"Insira a nota\", (contador+1), \" : \")\r\n nota =input()\r\n numNotas+=1\r\n if (contador ==0 or float(nota)>maximo):\r\n maximo = float(nota)\r\n print(\"Quer inserir outra nota (S/N) ? \")\r\n resposta=input()\r\n if (resposta =='n' or resposta =='N'):\r\n break\r\n contador+=1\r\nprint(\"Maximo : \", maximo,\".\")\r\nprint()","repo_name":"Astrokiller1/Astrorep","sub_path":"702_16_Maximo.py","file_name":"702_16_Maximo.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"21324470982","text":"#from wsgiref.handlers import BaseCGIHandler\nfrom metaflow import FlowSpec, step, IncludeFile, card\nimport subprocess\n\n\nbase_dir = \"/home/abd/Desktop/Work/Metaflow/BSM-Search/data/bsm-search\"\ncode_dir = '/home/abd/Desktop/Work/Metaflow/BSM-Search/code'\nthisroot_dir = '/home/abd/root/root/bin'\n\nlist_of_tasks = []\n\n#----------------------------------- MC CONFIGURATIONS START -----------------------------------\nmc_options =\\\n {\n \"mc1\": { 'data_type': 'mc1', 'mcweight': '0.01875', 'nevents': '40000', 'njobs': 4 },\n \"mc2\": { 'data_type': 'mc2', 'mcweight': '0.0125', 'nevents': '40000', 'njobs': 4 },\n \"sig\" : { 'data_type': 'sig', 'mcweight': '0.0025', 'nevents': '40000', 'njobs': 2 },\n \"data\" : { 'data_type': 'data', 'nevents': '20000', 'njobs': 5 }\n\n }\n\nselect_mc_options = \\\n {\n 'mc' : [ \n { 'region': 'signal', 'variation': 'shape_conv_up', 'suffix': 'shape_conv_up' },\n { 'region': 'signal', 'variation': 'shape_conv_dn', 'suffix': 'shape_conv_dn' },\n { 'region': 'signal', 'variation': 'nominal,weight_var1_up,weight_var1_dn', 'suffix': 'nominal' }\n ],\n 'sig' : [\n {'region':'signal','variation':'nominal','suffix':'nominal'}\n ],\n 'data': [\n { 'region': 'signal', 'variation': 'nominal', 'suffix': 'signal' },\n { 'region': 'control', 'variation': 'nominal', 'suffix': 'control' }\n ]\n }\n\n\nhistogram_options = \\\n {\n 'shape' : [ \n { 'variations' : 'nominal' , 'shapevar': 'shape_conv_up' },\n { 'variations' : 'nominal' , 'shapevar': 'shape_conv_dn' }\n ],\n 'mc1': {'variations' : 'nominal,weight_var1_up,weight_var1_dn', 'shapevar': 'nominal'},\n 'mc2': {'variations' : 'nominal,weight_var1_up,weight_var1_dn', 'shapevar': 'nominal'},\n 'sig': {'variations' : 'nominal', 'shapevar': 'nominal'},\n 'data' : [\n { 'variations' : 'nominal', 'shapevar': 'nominal', 'parent_data_type':'data', 'sub_data_type': 'signal', 'result':'data', 'weight': 1.0 },\n { 'variations' : 'nominal', 'shapevar': 'nominal', 'parent_data_type':'data', 'sub_data_type': 'control', 'result': 'qcd', 'weight': 0.1875 }\n ]\n }\n\n#----------------------------------- MC CONFIGURATIONS END -----------------------------------\n\n\n#----------------------------------- DATA CONFIGURATIONS START ----------------------------------- \nhist_weight_data_options = \\\n {\n 'data': { 'variations' : 'nominal', 'shapevar': 'nominal', 'parent_data_type':'data', 'sub_data_type': 'signal', 'result':'data', 'weight': 1.0 },\n 'qcd': { 'variations' : 'nominal', 'shapevar': 'nominal', 'parent_data_type':'data', 'sub_data_type': 'control', 'result': 'qcd', 'weight': 0.1875 }\n }\n#----------------------------------- DATA CONFIGURATIONS END ----------------------------------- \n\nmakews_outputs = [\n base_dir + \"/xmldir\",\n base_dir + \"/results_results.table\",\n base_dir + \"/results_meas.root\",\n base_dir + \"/results_combined_meas_model.root\",\n base_dir + \"/results_combined_meas_profileLR.eps\",\n base_dir + \"/results_channel1_meas_profileLR.eps\",\n base_dir + \"/results_channel1_meas_model.root\"\n]\n\nplot_outputs = [\n base_dir + \"/nominal_vals.yml\",\n base_dir + \"/fit_results.yml\",\n base_dir + \"/prefit.pdf\",\n base_dir + \"/postfit.pdf\"\n]\n\n#----------------------------------- Prepare Dir Operation Start -----------------------------------\ndef generatePrepareCommand():\n return f\"\"\"\n rm -rf {base_dir}\n mkdir -p {base_dir}\n \"\"\"\n#----------------------------------- Prepare Dir Operation End -----------------------------------\n\n#----------------------------------- Scatter Operation Start -----------------------------------\ndef scatter(option):\n import json\n data_type = option['data_type']\n output_file = base_dir+\"/\"+data_type+\".json\"\n json_object = { data_type:[i+1 for i in range(option['njobs'])]}\n with open(output_file,'w') as outfile:\n json.dump(json_object,outfile)\n#----------------------------------- Scatter Operation End -----------------------------------\n\n#----------------------------------- Generate Operation Start -----------------------------------\ndef generate_data_generation(option, job_number):\n data_type = option['data_type']\n output_file = base_dir + \"/\" + data_type + \"_\" + str(job_number) + \".root\"\n return {\n 'option':option,\n 'jobnumber':str(job_number), \n 'output_file':output_file\n }\n\ndef generate_GenerateCommand(data):\n return f\"\"\"\n \n source {thisroot_dir}/thisroot.sh\n pwd\n python {code_dir}/generatetuple.py {data['option']['data_type']} {data['option']['nevents']} {data['output_file']}\n \"\"\"\n#----------------------------------- Generate Operation End -----------------------------------\n\n#----------------------------------- Merge Root Operation Start -----------------------------------\ndef merge_root_data_generation(option):\n data_type = option['data_type']\n njobs = option['njobs']\n output_file = base_dir + '/' + data_type + '.root'\n input_files = ''\n for i in range (1,njobs+1):\n input_files += ' ' + base_dir + '/' + data_type + '_' + str(i) + '.root'\n return {\n 'output_file':output_file, \n 'input_files':input_files\n }\n\ndef merge_root_GenerateCommand(data):\n return f\"\"\"\n\n source {thisroot_dir}/thisroot.sh\n hadd -f {data['output_file']} {data['input_files']}\n \"\"\"\n#----------------------------------- Merge Root Operation End -----------------------------------\n\n#----------------------------------- Select Operation Start -----------------------------------\ndef select_data_genertion(option, select_option):\n data_type = option['data_type']\n suffix = select_option['suffix']\n region = select_option['region']\n variation = select_option['variation']\n return{\n 'input_file': base_dir + '/' + data_type + '.root',\n 'output_file': base_dir + '/' + data_type+'_'+suffix+'.root',\n 'region': region,\n 'variation': variation\n }\n\ndef select_GenerateCommand(data):\n return f\"\"\"\n \n source {thisroot_dir}/thisroot.sh \n python {code_dir}/select.py {data['input_file']} {data['output_file']} {data['region']} {data['variation']}\n \"\"\"\n#----------------------------------- Select Operation End -----------------------------------\n\n#----------------------------------- Hist Shape Operation Start -----------------------------------\ndef hist_shape_data_genertion(option, shapevar, variations):\n data_type = option['data_type']\n return {\n 'input_file': base_dir + '/' + data_type + '_' + shapevar + '.root',\n 'output_file': base_dir + '/' + data_type+'_'+shapevar+'_hist.root',\n 'option':option,\n 'shapevar':shapevar,\n 'variations':variations,\n \"name\":data_type+\"_\"+shapevar\n }\n\ndef hist_shape_GenerateCommand(data):\n return f\"\"\"\n \n source {thisroot_dir}/thisroot.sh \n python {code_dir}/histogram.py {data['input_file']} {data['output_file']} {data['option']['data_type']} {data['option']['mcweight']} {data['variations']} {data['name']}\n \"\"\"\n#----------------------------------- Hist Shape Operation End -----------------------------------\n\n#----------------------------------- Hist Weight Operation Start -----------------------------------\ndef hist_weight_data_genertion(option, shapevar, variations, hist_weight_data_options = None):\n data_type = option['data_type']\n weight = None\n input_file = ''\n output_file = data_type+'_hist.root'\n if('mc' in data_type or 'sig' in data_type):\n input_file = data_type+'_'+shapevar+'.root'\n output_file = data_type+'_'+shapevar+'_hist.root'\n weight = option['mcweight']\n elif ('data' in data_type and hist_weight_data_options != None):\n input_file = data_type+'_'+hist_weight_data_options['sub_data_type']+'.root'\n output_file = hist_weight_data_options['result']+'_hist.root'\n weight = hist_weight_data_options['weight']\n if('control' in hist_weight_data_options['sub_data_type']):\n data_type = hist_weight_data_options['result']\n if('sig' in data_type):\n data_type = data_type+'nal'\n return {\n 'input_file': base_dir + '/' + input_file,\n 'output_file': base_dir + '/' + output_file,\n 'option':option,\n 'weight':weight,\n 'variations':variations,\n 'name':data_type\n }\n\ndef hist_weight_GenerateCommand(data):\n return f\"\"\"\n source {thisroot_dir}/thisroot.sh \n python {code_dir}/histogram.py {data['input_file']} {data['output_file']} {data['name']} {data['weight']} {data['variations']}\n \"\"\"\n#----------------------------------- Hist Weight Operation End -----------------------------------\n\n#----------------------------------- Merge Explicit Operation Start -----------------------------------\ndef merge_explicit_data_genertion(option, operation = None):\n data_type = option['data_type']\n input_files = ''\n output_file = data_type+'_merged_hist.root'\n\n if('mc' in data_type):\n if('merge_hist_shape' in operation):\n input_files = base_dir + '/' + data_type + '_shape_conv_up_hist.root ' + \\\n base_dir + '/' + data_type +'_shape_conv_dn_hist.root'\n output_file = data_type+'_shape_hist.root'\n elif('merge_hist_all' in operation):\n input_files = base_dir + '/' + data_type + '_nominal_hist.root ' + \\\n base_dir + '/' + data_type + '_shape_hist.root'\n elif('sig' in data_type):\n input_files = base_dir + '/' + data_type + '_nominal_hist.root'\n elif('data' in data_type):\n input_files = base_dir + '/' + data_type + '_hist.root '+ base_dir+'/qcd_hist.root'\n elif('all' in data_type):\n input_files = base_dir + '/' + 'mc1_merged_hist.root ' + \\\n base_dir + '/' + 'mc2_merged_hist.root ' + \\\n base_dir + '/' + 'sig_merged_hist.root ' + \\\n base_dir + '/' + 'data_merged_hist.root'\n output_file = \"all_merged_hist.root\"\n option = 'all'\n return{\n 'input_files': input_files,\n 'output_file': base_dir + \"/\" + output_file,\n }\n\n return{\n 'input_files': input_files,\n 'output_file': base_dir + \"/\" + output_file,\n }\n\ndef merge_explicit_GenerateCommand(data):\n return f\"\"\"\n source {thisroot_dir}/thisroot.sh \n hadd -f {data['output_file']} {data['input_files']}\n \"\"\"\n#----------------------------------- Merge Explicit Operation End -----------------------------------\n\n#----------------------------------- Makews Operation Start -----------------------------------\ndef makews_data_generation(data_bkg_hists,workspace_prefix,xml_dir):\n return {\n 'data_bkg_hists':data_bkg_hists,\n 'workspace_prefix':workspace_prefix,\n 'xml_dir':xml_dir\n }\n\ndef makews_GenerateCommand(data):\n return f\"\"\"\n source {thisroot_dir}/thisroot.sh\n python {code_dir}/makews.py {data['data_bkg_hists']} {data['workspace_prefix']} {data['xml_dir']}\n \"\"\"\n#----------------------------------- Makews Operation End -----------------------------------\n\n#----------------------------------- Plot Operation Start -----------------------------------\ndef plot_data_generation(combined_model, nominal_vals, fit_results, prefit_plot, postfit_plot):\n return {\n 'combined_model':combined_model,\n 'nominal_vals':nominal_vals,\n 'fit_results':fit_results,\n 'prefit_plot':prefit_plot,\n 'postfit_plot':postfit_plot\n }\n\ndef plot_GenerateCommand(data):\n return f\"\"\"\n set -x\n source {thisroot_dir}/thisroot.sh\n hfquickplot write-vardef {data['combined_model']} combined {data['nominal_vals']}\n hfquickplot plot-channel {data['combined_model']} combined channel1 x {data['nominal_vals']} -c qcd,mc2,mc1,signal -o {data['prefit_plot']}\n hfquickplot fit {data['combined_model']} combined {data['fit_results']}\n hfquickplot plot-channel {data['combined_model']} combined channel1 x {data['fit_results']} -c qcd,mc2,mc1,signal -o {data['postfit_plot']}\n\t\"\"\"\n#----------------------------------- Plot Operation End -----------------------------------\n\n#----------------------------------- Run Command Start -----------------------------------\ndef run_bash(bashCommand):\n process = subprocess.Popen(bashCommand, shell=True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n output, error = process.communicate()\n print(\"The command is: \\n\",bashCommand)\n print(\"The output is: \\n\",output.decode())\n print(\"Return Code:\", process.returncode)\n if process.returncode and error:\n print(\"The error is: \\n\",error.decode())\n#----------------------------------- Run Command End -----------------------------------\n\nclass BSM_Search(FlowSpec):\n \n @card\n @step\n def start(self):\n print(\"Start Flow\")\n self.next(self.prepare_directory)\n\n @card\n @step\n def prepare_directory(self):\n bashCommand = generatePrepareCommand()\n run_bash(bashCommand)\n self.options = []\n for key in mc_options.keys():\n self.options.append(key)\n self.next(self.scatter_operation, foreach=\"options\")\n \n @card\n @step\n def scatter_operation(self):\n self.option = mc_options[self.input]\n scatter(self.option)\n self.jobs = []\n for i in range (1,self.option['njobs']+1):\n self.jobs.append(str(i))\n self.next(self.generate_operation, foreach=\"jobs\")\n \n @card\n @step\n def generate_operation(self):\n option = self.option\n jobnumber = self.input\n data = generate_data_generation(option, jobnumber)\n bashCommand = generate_GenerateCommand(data)\n run_bash(bashCommand)\n self.next(self.merge_root_operation)\n\n @card\n @step\n def merge_root_operation(self,inputs): \n self.merge_artifacts(inputs)\n option = self.option\n data = merge_root_data_generation(option)\n bashCommand = merge_root_GenerateCommand(data)\n run_bash(bashCommand)\n self.next(self.merge_and_select_link)\n\n @card\n @step\n def merge_and_select_link(self): \n option = self.option\n if(option['data_type'][2].isdigit()):\n data_type = 'mc'\n else:\n data_type = option['data_type']\n\n self.select_options = select_mc_options[data_type]\n\n self.next(self.select_operation, foreach=\"select_options\")\n\n @card\n @step\n def select_operation(self): \n option = self.option\n select_option = self.input\n data_type = option['data_type']\n data = select_data_genertion(option, select_option)\n bashCommand = select_GenerateCommand(data)\n run_bash(bashCommand)\n self.next(self.join_select)\n \n @card\n @step\n def join_select(self,inputs):\n self.merge_artifacts(inputs)\n self.next(self.select_and_hist_link)\n\n @card \n @step\n def select_and_hist_link(self): \n option = self.option\n data_type = option['data_type']\n self.hist_options = [] \n if('data' in data_type):\n self.hist_options += histogram_options[data_type]\n else:\n self.hist_options.append(histogram_options[data_type])\n\n if('mc' in data_type):\n self.hist_options += histogram_options['shape']\n\n self.next(self.hist_operation, foreach='hist_options')\n \n @card\n @step\n def hist_operation(self): \n option = self.option\n data_type = option['data_type']\n hist_option = self.input\n shapevar = hist_option['shapevar']\n variations = hist_option['variations']\n bashCommand = ''\n if(shapevar in ['shape_conv_up', 'shape_conv_dn']):\n data = hist_shape_data_genertion(self.option, shapevar, variations)\n bashCommand = hist_shape_GenerateCommand(data)\n else:\n if('data' in data_type):\n data = hist_weight_data_genertion(self.option, shapevar, variations, hist_option)\n else:\n data = hist_weight_data_genertion(self.option, shapevar, variations)\n \n bashCommand = hist_weight_GenerateCommand(data)\n\n run_bash(bashCommand)\n self.next(self.join_hists)\n \n @card\n @step\n def join_hists(self,inputs):\n self.merge_artifacts(inputs)\n option = self.option\n\n if('mc' in option['data_type']):\n data = merge_explicit_data_genertion(self.option, 'merge_hist_shape')\n bashCommand = merge_explicit_GenerateCommand(data)\n run_bash(bashCommand)\n \n data = merge_explicit_data_genertion(self.option, 'merge_hist_all')\n bashCommand = merge_explicit_GenerateCommand(data)\n run_bash(bashCommand)\n self.next(self.join_scatter)\n\n '''\n @card\n @step\n def join_hist_shape(self,inputs):\n self.merge_artifacts(inputs, include=['variations'])\n option = self.option\n if('mc' in option['data_type']):\n variations = self.variations\n data = merge_explicit_data_genertion(self.option, 'merge_hist_shape', variations)\n bashCommand = merge_explicit_GenerateCommand(data)\n run_bash(bashCommand)\n self.next(self.join_hists)\n\n @card\n @step\n def join_hists(self):\n option = self.option\n variations = self.hist_option['variations']\n data = merge_explicit_data_genertion(self.option, 'merge_hist_all', variations)\n bashCommand = merge_explicit_GenerateCommand(data)\n run_bash(bashCommand)\n self.next(self.join_select)\n '''\n\n \n\n @card\n @step\n def join_scatter(self,inputs):\n self.next(self.end)\n\n @card\n @step\n def end(self):\n print(\"End Flow\")\n\n\n\nif __name__ == '__main__':\n BSM_Search()\n","repo_name":"AbdAlRahman-Odeh-99/bsm-search-metaflow","sub_path":"bsm-search.py","file_name":"bsm-search.py","file_ext":"py","file_size_in_byte":18233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"70854993479","text":"import requests\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom django.views import View\n\n\nclass RefundView(View):\n MERCHANT_PRIVATE_KEY = '1dab4b92513e63571052'\n SANDBOX_URL = 'https://business.reactivepay.com'\n\n def get(self, request):\n return render(request, 'refund/create_refund.html')\n\n def post(self, request):\n\n payload = {\n \"token\": request.POST['token payment'],\n \"amount\": int(request.POST['request amount'])\n }\n\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': f'Bearer {self.MERCHANT_PRIVATE_KEY}'\n }\n\n response = requests.post(f'{self.SANDBOX_URL}/api/v1/refunds', json=payload, headers=headers)\n\n if response.status_code == 200:\n data = response.json()\n return HttpResponse(\n f\"{data['refund']['amount']}, {data['refund']['status']}\")\n else:\n return HttpResponse(f'Something gone wrong: {response.status_code}')\n","repo_name":"Lbisiy/reactivePay","sub_path":"reactivePay/refund/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"13328171441","text":"#%%\n# Otimização estrutural do caso 2\n## A otimização, area que busca encontrar soluções otimas para os problemas, permeia todas as areas da engenharia, desde a parte de projeto conceitual onde esta é usada para conceitos conceituais de projetos, mas também é muito usada na remanufatura de projetos, tentado otimizar esse quanto a um dado objetivo. \n\n\n## A ideia aqui é usar um algoritmo genetico para otimizar um estrutura treliçada, tendo como variaveis de otimização as areas das seções transversais de cada tubo. Tendo como funções de avaliação a tensão no elemento de treliça e o volume da estrutura, buscando minimizar a tensão e o volume da estrutura. Para isso vamos usar a biblioteca de algoritmos géneticos DEAP e o algoritmo multiobjetivo NSGA II.\n\n\n# %%\n### Definindo o problema\nimport random\n\nfrom Treelicia import Fem3d\nimport numpy as np\nimport plotter3D as plott\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport time\nfrom deap import algorithms\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\n \n\n# montando a geometri\nL = 9 # comprimento da cauda\nn = 5 # numero de pontos por reta\n\n## direita\ny_sup_d = lambda i: (-0.15-i*0.45, L-i*L, 0.6)\nnodes1 = ([y_sup_d(i) for i in np.linspace(0,1,n)])\n\n#y_inf_d = lambda j: (-0.15-j*0.45, L-j*L, 0.3-j*0.9)\n#nodes2 = ([y_inf_d(i) for i in np.linspace(0,1,n)])\n\n## esquerda\ny_sup_e = lambda i: (0.6-i*0.45, 0+i*L, 0.6)\nnodes3 = ([y_sup_e(i) for i in np.linspace(1,0,n)])\n\ny_inf_e = lambda j: (0, 0+j*L, -0.6+j*0.9)\nnodes4 = ([y_inf_e(i) for i in np.linspace(1,0,n)])\n\n# ponto final\nnodes5 = ([ (0, L+2.4, 0.6+0.1) ])\n\nnodes = []\nnodes.extend(nodes1)\n#nodes.extend(nodes2)\nnodes.extend(nodes3)\nnodes.extend(nodes4)\nnodes.extend(nodes5)\n\n# add index\nnodes = tuple( [ (j, nodes[j][0], nodes[j][1], nodes[j][2]) for j in range(len(nodes))] )\n\n# elementos \n## segmentos retos\nelementos1 = ([ (i,i+1) for i in range(n-1) ] )\nelementos2 = ([ (i,i+1) for i in range(n,2*n-1)])\nelementos3 = ([ (i,i+1) for i in range(2*n,3*n-1)])\n#elementos4 = ([ (i,i+1) for i in range(3*n,4*n-1)])\n\n## elementos triangulares\nelementos4 = ([ (i, i+n) for i in range(0,n)])\nelementos5 = ([ (i+n,i ) for i in range(n,2*n)])\nelementos6 = ([ (i+2*n, i) for i in range(0,n)])\n\n# elemento final \nelementos7 = ( (0,len(nodes)-1),\n (n ,len(nodes)-1),\n (2*n ,len(nodes)-1)\n)\n\n# elemento trocados\nelementos8 = (\n (14,3),\n (14,8),\n (13,7),\n (13,2),\n (12,6),\n (12,1),\n (11,5),\n (11,0),\n (9,3),\n (8,2),\n (7,1),\n (6,0),\n)\n \n\nelementos = []\nelementos.extend(elementos1)\nelementos.extend(elementos2)\nelementos.extend(elementos3)\nelementos.extend(elementos4)\nelementos.extend(elementos5)\nelementos.extend(elementos6)\nelementos.extend(elementos7)\nelementos.extend(elementos8)\n\nelementos = tuple( [ (j, elementos[j][0], elementos[j][1]) for j in range(len(elementos))] )\n\nF = 10000 #N\nforcas = (\n #(15, 0, -F),\n (15, 1, -0.75*F),\n (15, 2, 0.5*F),\n)\n\ncontorno = (\n (4,0,0),\n (4,1,0),\n (4,2,0),\n (9,0,0),\n (9,1,0),\n (9,2,0),\n (14,0,0),\n (14,1,0),\n (14,2,0)\n)\n\nE2 = 73e9#gpa aluminio\nA2 = 0.0013# m^2 - diametro de 4 cm\nrho = 27000# kg/m^3\n\nMAX_ATRIB = 8\nAREA_REF = 0.0013/4.0 #m^2\n\n\n\ndef stress(individuo):\n individuo = np.array(individuo)\n \n try:\n A2 = AREA_REF*individuo\n model1 = Fem3d(nodes, elementos, forcas, contorno, E2, A2, rho)\n deslocamento, reacoes = model1.solve()\n tensoes = model1.getStress(deslo= deslocamento)\n \n except:\n return 10**12\n return max(np.abs(tensoes)*10**-6)\n\ndef volume(individuo):\n individuo = np.array(individuo)\n volu = 0\n ltotal = 0\n for index, elem in enumerate(elementos):\n # index é o elemento\n x = nodes[elem[1]][1] - nodes[elem[2]][1]\n y = nodes[elem[1]][2] - nodes[elem[2]][2]\n z = nodes[elem[1]][3] - nodes[elem[2]][3]\n L = np.sqrt(x**2+y**2+z**2)\n ltotal += L\n volu += AREA_REF*individuo[index]*L\n return volu\n \ndef fitness(individuo):\n\n vol = volume(individuo)\n tensao = stress(individuo)\n\n plt.plot(vol, tensao, '.', color=\"black\")\n\n return vol, tensao\n\ndef crossover(indiA, indiB):\n troca = random.randint(1,len(indiA)-2)\n \n aux = indiA[0:troca]\n indiA[0:troca] = indiB[0:troca]\n indiB[0:troca] = aux\n return indiA, indiB\n\ndef checkGeometria(individuo):\n \n for index, gene in enumerate(individuo):\n\n if not(gene >= 1 and gene <= 8):\n \n return False \n \n return True\n\n\ncreator.create(\"FitnessMax\", base.Fitness, weights=(-1.0,-1.0))\ncreator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\ntoolbox.register(\"attr_bool\", random.randint, 1, MAX_ATRIB)\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=len(elementos))\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ntoolbox.register(\"evaluate\", fitness)\ntoolbox.register(\"mate\", tools.cxTwoPoint)\ntoolbox.register(\"mutate\", tools.mutUniformInt, low = 1, up = MAX_ATRIB, indpb = 0.05)\ntoolbox.register(\"select\", tools.selNSGA2)\n\ndef main(max_generations, population_size):\n\n\n NGEN = max_generations\n MU = population_size\n LAMBDA = 100\n CXPB = 0.7\n MUTPB = 0.2\n \n\n random.seed(64)\n pool = multiprocessing.Pool(processes=4)\n toolbox.register(\"map\", pool.map)\n \n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n \n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean)\n stats.register(\"std\", np.std)\n stats.register(\"min\", np.min)\n stats.register(\"max\", np.max)\n \n algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof)\n\n\n return pop, stats, hof\n\n\n\nif __name__ == \"__main__\":\n import plotter3D as plott\n start = time.time()\n [pop, logger, hof] = main(300, 100)\n finish = time.time()\n best = hof.items[-13]\n print()\n print(\"tempo final: \",finish-start)\n print(\"Melhor Solucao = \",best, volume(best))\n print(\"Fitness do melhor individuo = \", best.fitness.values[0])\n x = []\n y = []\n for ind in hof:\n x.append(volume(ind))\n y.append(stress(ind))\n plt.title('Otimização usando algoritmo NSGA - II')\n plt.plot(x, y, '.', color=\"green\", label = 'Fronteira de pareto')\n plt.ticklabel_format(style=\"plain\")\n plt.grid()\n plt.xlabel(\"Volume dos tubos\")\n plt.ylabel(\"Stress [Mpa]\")\n plt.legend()\n plt.show()\n \n #-------- sem otimizacao\n model2 = Fem3d(nodes,elementos,forcas,contorno,E2,0.0013,rho)\n Deslocamento2, reacoes2 = model2.solve()\n tensoes2 = model2.getStress(deslo= Deslocamento2)\n tensoes2 = np.array(tensoes2) #mpa\n pos = plott.Posprocess(model2)\n pos.plotStress3D(tensoes2, var='[Mpa]')\n\n #-------- otimizacao\n model2 = Fem3d(nodes,elementos,forcas,contorno,E2,AREA_REF*np.array(best),rho)\n Deslocamento2, reacoes2 = model2.solve()\n tensoes2 = model2.getStress(deslo= Deslocamento2)\n tensoes2 = np.array(tensoes2)*(10**-6) #mpa\n pos = plott.Posprocess(model2)\n pos.plotStress3D(tensoes2, var='[Mpa]')\n\n # printando os resultados \n print('=============================================')\n print('\\t\\tDeslocamentos')\n print('=============================================')\n print(f'GL \\t\\t Desl. [mm] \\t\\t Reacoes [N]')\n\n for index in range(len(Deslocamento2)):\n print('{0:1.2f} \\t\\t {1:4.4f} \\t\\t {2:4.4f}'.format(index, Deslocamento2[index], reacoes2[index]))\n\n print('=============================================')\n print('\\t\\tTensões')\n print('=============================================')\n print(f'Elemento \\t\\t Tensão [Pa] \\t\\t Area [mm] ')\n\n for index in range(len(tensoes2)):\n print('{0:1.2f} \\t\\t {1:4.4f} \\t\\t {2:4.4f} '.format(index, tensoes2[index], AREA_REF*best[index]))\n\n plt.show()\n\n#Volume da estrura: 0.09820291580533828\n#Tensão maxima: 368.6936618400466\n#154.73","repo_name":"marcy3ait/Trelicia","sub_path":"otimizacao_estudoCaso2.py","file_name":"otimizacao_estudoCaso2.py","file_ext":"py","file_size_in_byte":8176,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9630763745","text":"t0 = 1\nt1 = 1\nt2 = 0\nrepeticoes = 0\n\nnumero = int(input(\"Digite a quantidade de termos :\"))\n\nwhile repeticoes < numero :\n if repeticoes < 2:\n print(\" {} \".format(t0) ,end = \"->\")\n else :\n print(\" {} \".format((t0+t1)) ,end = \"->\")\n t2 = t1\n t1 = t0+t1 \n t0 = t2\n repeticoes += 1\nprint(\"Acabou\")\n ","repo_name":"victonios8520/Aprendendo-python","sub_path":"Python guanabara/Exercicios 60 a 100/Exercicio63.py","file_name":"Exercicio63.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"25377822092","text":"import numpy as np\r\nimport pandas as pd\r\nimport torch\r\nfrom scipy import sparse\r\n\r\nfrom config import Data_Fold\r\n\r\n\r\ndef getdata():\r\n df_adj = pd.read_excel(Data_Fold / 'IEEE_54.xlsx', header=None)\r\n sp_adj = sparse.csr_matrix(df_adj)\r\n\r\n data = pd.read_excel(Data_Fold / '正常数据TS.xlsx',\r\n header=None) # 默认读取第一个sheet\r\n data = data.dropna(axis=1)\r\n feat = np.array(data).T\r\n feat = torch.tensor(feat, dtype=torch.float32)\r\n feat = feat / 100\r\n\r\n return sp_adj, feat\r\n\r\n\r\nif __name__ == \"__main__\":\r\n adj, feat = getdata()\r\n","repo_name":"brucelee07/gan-sample","sub_path":"gcn_data.py","file_name":"gcn_data.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"9311003539","text":"protocol = 'http'\nurl = 'localhost'\nport = '8080'\n\nroutes = {\n 'state': {\n 'method': 'GET',\n 'uri': '/plugin/state'\n },\n 'details': {\n 'method': 'GET',\n 'uri': '/plugin/details/{plugin_name}/{plugin_version}'\n }\n}\n\n\ndef get_url(route_name, params):\n route = routes[route_name]\n uri = route['uri']\n method = route['method']\n complete_url = protocol + '://' + url + ':' + port + uri\n\n for key in params:\n complete_url = str.replace(complete_url, '{' + key + '}', params[key])\n\n return (method, complete_url)\n","repo_name":"easeci/easeci-cli","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"41585682714","text":"import re\nimport time\nimport gc\nfrom tqdm import tqdm\nimport random\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\nfrom sklearn.metrics import f1_score, roc_auc_score\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\n\n\ndef seed_torch(seed=1029):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\nembed_size = 300 # how big is each word vector\nmax_features = 95000 # how many unique words to use (i.e num rows in embedding vector)\nmaxlen = 72 # max number of words in a question to use\nbatch_size = 512\ntrain_epochs = 6\nSEED = 1029\n\ndef clean_data(x):\n return x\n\n\ndef load_and_prec():\n train_df = pd.read_csv(\"../input/train.csv\")\n test_df = pd.read_csv(\"../input/test.csv\")\n print(\"Train shape : \", train_df.shape)\n print(\"Test shape : \", test_df.shape)\n\n # lower\n train_df[\"question_text\"] = train_df[\"question_text\"].progress_apply(lambda x: x.lower())\n test_df[\"question_text\"] = test_df[\"question_text\"].progress_apply(lambda x: x.lower())\n\n ## fill up the missing values\n train_X = train_df[\"question_text\"].values\n test_X = test_df[\"question_text\"].values\n\n ## Tokenize the sentences\n tokenizer = Tokenizer(num_words=max_features)\n tokenizer.fit_on_texts(list(train_X))\n train_X = tokenizer.texts_to_sequences(train_X)\n test_X = tokenizer.texts_to_sequences(test_X)\n\n ## Pad the sentences\n train_X = pad_sequences(train_X, maxlen=maxlen)\n test_X = pad_sequences(test_X, maxlen=maxlen)\n\n ## Get the target values\n train_y = train_df['target'].values\n\n # shuffling the data\n np.random.seed(SEED)\n trn_idx = np.random.permutation(len(train_X))\n\n train_X = train_X[trn_idx]\n train_y = train_y[trn_idx]\n\n return train_X, test_X, train_y, tokenizer.word_index\n\n\ndef load_glove(word_index):\n EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'\n\n def get_coefs(word, *arr):\n return word, np.asarray(arr, dtype='float32')\n\n embeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(EMBEDDING_FILE))\n\n all_embs = np.stack(embeddings_index.values())\n emb_mean, emb_std = all_embs.mean(), all_embs.std()\n embed_size = all_embs.shape[1]\n\n # word_index = tokenizer.word_index\n nb_words = min(max_features, len(word_index))\n embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\n for word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\n\n return embedding_matrix\n\ntqdm.pandas()\n\nstart_time = time.time()\n\ntrain_X, test_X, train_y, word_index = load_and_prec()\nembedding_matrix_1 = load_glove(word_index)\n\ntotal_time = (time.time() - start_time) / 60\nprint(\"Took {:.2f} minutes\".format(total_time))\n\nembedding_matrix = np.mean([embedding_matrix_1], axis=0)\n# embedding_matrix = np.concatenate((embedding_matrix_1, embedding_matrix_2), axis=1)\nprint(np.shape(embedding_matrix))\n\ndel embedding_matrix_1\ngc.collect()\n\nclass Attention(nn.Module):\n def __init__(self, feature_dim, step_dim, bias=True, **kwargs):\n super(Attention, self).__init__(**kwargs)\n\n self.supports_masking = True\n\n self.bias = bias\n self.feature_dim = feature_dim\n self.step_dim = step_dim\n self.features_dim = 0\n\n weight = torch.zeros(feature_dim, 1)\n nn.init.xavier_uniform_(weight)\n self.weight = nn.Parameter(weight)\n\n if bias:\n self.b = nn.Parameter(torch.zeros(step_dim))\n\n def forward(self, x, mask=None):\n feature_dim = self.feature_dim\n step_dim = self.step_dim\n\n eij = torch.mm(\n x.contiguous().view(-1, feature_dim),\n self.weight\n ).view(-1, step_dim)\n\n if self.bias:\n eij = eij + self.b\n\n eij = torch.tanh(eij)\n a = torch.exp(eij)\n\n if mask is not None:\n a = a * mask\n\n a = a / torch.sum(a, 1, keepdim=True) + 1e-10\n\n weighted_input = x * torch.unsqueeze(a, -1)\n return torch.sum(weighted_input, 1)\n\nclass NeuralNet(nn.Module):\n def __init__(self):\n super(NeuralNet, self).__init__()\n\n hidden_size = 60\n\n self.embedding = nn.Embedding(max_features, embed_size)\n self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))\n self.embedding.weight.requires_grad = False\n\n self.embedding_dropout = nn.Dropout2d(0.1)\n self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True)\n self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)\n\n self.lstm_attention = Attention(hidden_size * 2, maxlen)\n self.gru_attention = Attention(hidden_size * 2, maxlen)\n\n self.linear = nn.Linear(480, 16)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.1)\n self.out = nn.Linear(16, 1)\n\n def forward(self, x):\n h_embedding = self.embedding(x)\n h_embedding = torch.squeeze(self.embedding_dropout(torch.unsqueeze(h_embedding, 0)))\n\n h_lstm, _ = self.lstm(h_embedding)\n h_gru, _ = self.gru(h_lstm)\n\n h_lstm_atten = self.lstm_attention(h_lstm)\n h_gru_atten = self.gru_attention(h_gru)\n\n avg_pool = torch.mean(h_gru, 1)\n max_pool, _ = torch.max(h_gru, 1)\n\n conc = torch.cat((h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1)\n conc = self.relu(self.linear(conc))\n conc = self.dropout(conc)\n out = self.out(conc)\n\n return out\n\nsplits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED).split(train_X, train_y))\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef threshold_search(y_true, y_proba):\n best_threshold = 0\n best_score = 0\n for threshold in tqdm([i * 0.01 for i in range(100)]):\n score = f1_score(y_true=y_true, y_pred=y_proba > threshold)\n if score > best_score:\n best_threshold = threshold\n best_score = score\n search_result = {'threshold': best_threshold, 'f1': best_score}\n return search_result\n\n\ntrain_preds = np.zeros((len(train_X)))\ntest_preds = np.zeros((len(test_X)))\n\nseed_torch(SEED)\n\nx_test_cuda = torch.tensor(test_X, dtype=torch.long).cuda() # .cuda(): CPU -> GPU, .cpu():GPU -> CPU\ntest = torch.utils.data.TensorDataset(x_test_cuda)\ntest_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)\n\nfor i, (train_idx, valid_idx) in enumerate(splits):\n x_train_fold = torch.tensor(train_X[train_idx], dtype=torch.long).cuda()\n y_train_fold = torch.tensor(train_y[train_idx, np.newaxis], dtype=torch.float32).cuda()\n x_val_fold = torch.tensor(train_X[valid_idx], dtype=torch.long).cuda()\n y_val_fold = torch.tensor(train_y[valid_idx, np.newaxis], dtype=torch.float32).cuda()\n\n model = NeuralNet()\n model.cuda()\n\n loss_fn = torch.nn.BCEWithLogitsLoss(reduction=\"sum\")\n optimizer = torch.optim.Adam(model.parameters())\n\n train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold)\n valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold)\n\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False)\n\n print(f'Fold {i + 1}')\n\n for epoch in range(train_epochs):\n start_time = time.time()\n\n model.train()\n avg_loss = 0.\n for x_batch, y_batch in tqdm(train_loader, disable=True): # disable 禁用进度条包装器\n y_pred = model(x_batch)\n loss = loss_fn(y_pred, y_batch)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n avg_loss += loss.item() / len(train_loader)\n\n model.eval() # 验证模式\n valid_preds_fold = np.zeros((x_val_fold.size(0)))\n test_preds_fold = np.zeros(len(test_X))\n avg_val_loss = 0.\n for i, (x_batch, y_batch) in enumerate(valid_loader):\n y_pred = model(x_batch).detach()\n avg_val_loss += loss_fn(y_pred, y_batch).item() / len(valid_loader)\n valid_preds_fold[i * batch_size:(i + 1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]\n\n elapsed_time = time.time() - start_time\n print('Epoch {}/{} \\t loss={:.4f} \\t val_loss={:.4f} \\t time={:.2f}s'.format(\n epoch + 1, train_epochs, avg_loss, avg_val_loss, elapsed_time))\n\n for i, (x_batch,) in enumerate(test_loader):\n y_pred = model(x_batch).detach()\n\n test_preds_fold[i * batch_size:(i + 1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]\n\n train_preds[valid_idx] = valid_preds_fold\n test_preds += test_preds_fold / len(splits)\n\nsearch_result = threshold_search(train_y, train_preds)\nsub = pd.read_csv('../input/sample_submission.csv')\nsub.prediction = test_preds > search_result['threshold']\nsub.to_csv(\"submission.csv\", index=False)\n\n","repo_name":"wgq1995/nlp","sub_path":"notebooks/pytorch_kernel.py","file_name":"pytorch_kernel.py","file_ext":"py","file_size_in_byte":9405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73276586756","text":"from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression\nfrom pynestml.symbols.error_type_symbol import ErrorTypeSymbol\nfrom pynestml.symbols.symbol import SymbolKind\nfrom pynestml.utils.logger import LoggingLevel, Logger\nfrom pynestml.utils.messages import MessageCode\nfrom pynestml.visitors.ast_visitor import ASTVisitor\n\n\nclass ASTVariableVisitor(ASTVisitor):\n \"\"\"\n This visitor visits a single variable and updates its type.\n \"\"\"\n\n def visit_simple_expression(self, node):\n \"\"\"\n Visits a single variable as contained in a simple expression and derives its type.\n :param node: a single simple expression\n :type node: ASTSimpleExpression\n \"\"\"\n assert isinstance(node, ASTSimpleExpression), \\\n '(PyNestML.Visitor.VariableVisitor) No or wrong type of simple expression provided (%s)!' % type(node)\n assert (node.get_scope() is not None), \\\n '(PyNestML.Visitor.VariableVisitor) No scope found, run symboltable creator!'\n\n scope = node.get_scope()\n var_name = node.get_variable().get_complete_name()\n var_resolve = node.get_variable().get_scope().resolve_to_symbol(var_name, SymbolKind.VARIABLE)\n\n # update the type of the variable according to its symbol type.\n if var_resolve is not None:\n node.type = var_resolve.get_type_symbol()\n node.type.referenced_object = node\n return\n\n # check if var_name is actually a type literal (e.g. \"mV\")\n var_resolve = scope.resolve_to_symbol(var_name, SymbolKind.TYPE)\n if var_resolve is not None:\n node.type = var_resolve\n node.type.referenced_object = node\n return\n\n message = 'Variable ' + str(node) + ' could not be resolved!'\n Logger.log_message(code=MessageCode.SYMBOL_NOT_RESOLVED,\n error_position=node.get_source_position(),\n message=message, log_level=LoggingLevel.ERROR)\n node.type = ErrorTypeSymbol()\n\n def visit_expression(self, node):\n raise Exception(\"Deprecated method used!\")\n","repo_name":"nest/nestml","sub_path":"pynestml/visitors/ast_variable_visitor.py","file_name":"ast_variable_visitor.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"62"} +{"seq_id":"41254740164","text":"# -*- coding: utf-8 -*-\n\n\ndef main():\n import sys\n\n input = sys.stdin.readline\n\n n, m = map(int, input().split())\n l = list(map(int, input().split()))\n\n ng = max(l) - 1\n ok = 10**18\n\n def f(wj):\n count = 1\n tmp_w = wj\n\n for li in l:\n if tmp_w - li < 0:\n count += 1\n tmp_w = wj\n\n tmp_w -= li\n\n if tmp_w >= 1:\n tmp_w -= 1\n\n return count <= m\n\n while abs(ok - ng) > 1:\n wj = (ok + ng) // 2\n\n if f(wj):\n ok = wj\n else:\n ng = wj\n\n print(ok)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KATO-Hiro/AtCoder","sub_path":"ABC/abc301-abc350/abc319/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"16728151369","text":"import re\nimport numpy as np\n\ndef load_X(X_data_path):\n X_data = []\n \n # Open the file for reading\n file = open(X_data_path, 'r')\n \n # Read the dataset from disk\n X_data.append(\n [np.array(series, dtype=np.float32) for series in [\n re.split(\" +\", row.strip(\"\\n\")) for row in file\n ]]\n )\n file.close()\n\n return np.array(X_data)\n\ndataset_local_path = 'synthetic_control_data.txt'\n\n# Load the time series dataset from file\ncontrol_data = load_X(dataset_local_path)\n\n# Save time series dataset to CSV\nnp.savetxt(\"synthetic_control_data.csv\", control_data[0], fmt=\"%2.5f\", delimiter=\",\")\n\n# import pandas as pd\n# df = pd.DataFrame(control_data[0])\n# for idx, row in df.iterrows():\n# import pdb; pdb.set_trace()\n# df.to_csv(\"synthetic_control_data.csv\", header=None, index=None)\n","repo_name":"frankhinek/pachyderm-examples","sub_path":"sensor-classification/convert_to_csv.py","file_name":"convert_to_csv.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"6666773793","text":"import csv\nimport datetime\nimport glob\nimport os\nimport shutil # Delete folder\nimport threading\nimport time\nimport tkinter as tk\nimport webbrowser\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\nimport requests\n\nfrom src import receiver, previous_semesters, previous_data, room_capacity\n\n\nclass UserInterface(Frame):\n # It is better to define values like the following ones as constants (uppercase) in a single place (like here)\n GOOGLE_FORM_URL = 'https://goo.gl/forms/wNkzjymOQ7wiNavf1'\n INSTRUCTIONS_URL = 'https://docs.google.com/document/d/1htRsKmxDX33yawrYqeHkCLWlEL-juRjeM-if8N4f2yo/edit?usp=sharing'\n LATEST_RELEASE_URL = 'https://github.com/igorneaga/schedule/releases/latest'\n\n def __init__(self, master, current_path):\n super().__init__(master)\n self.grid()\n\n # Assets\n self.cwd = current_path\n self.path = self.cwd\n\n self.BackImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\back_icon_45x45.png')\n self.OutOrderImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\table_v05_default.png')\n self.InOrderImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\table_v05_in_order.png')\n self.ExcelCopyFile = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\excel_files_icon.png')\n self.ExcelMainFile = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\master_file_icon.png')\n self.CreateMasterImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\create_master.png')\n self.CreatePayrollImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\create_fwm_table.png')\n self.GetPreviousImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\get_prev_tables.png')\n self.ExitApplicationImage = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\quit_button.png')\n self.UseLocalFiles = tk.PhotoImage(file=f'{self.cwd}\\\\assets\\\\use_local.png')\n\n # Default table characteristics\n today_date = time.strftime(\"%Y,%m\")\n today_date_split = today_date.split(',')\n self.table_settings_type = 1\n self.table_settings_year = today_date_split[0]\n self.table_settings_semester = \"Fall\"\n self.table_settings_name = \"Uni_Table\"\n self.table_friday_include = 0\n\n if int(today_date_split[1]) < 5:\n self.web_semester_parameters = \"Spring\"\n else:\n self.web_semester_parameters = \"Fall\"\n self.web_department_parameters = \"ACCT\"\n self.web_year = today_date_split[0]\n # Holds user choice both standard and urlencode\n self.urlencode_dict_list = []\n\n # Information from data files\n self.file_name = None\n self.files_show_directory = []\n self.files_show_names = []\n self.files_string = None\n\n # GUI windows\n self.selection_window = None\n self.files_manipulation_window = None\n self.introduction = None\n self.settings_window = None\n self.creating_step_window = None\n self.notification_window = None\n self.get_example_window = None\n self.payroll_window = None\n self.cost_department_list = None\n\n # GUI buttons, radio buttons, insertion box and others\n self.create_table_button = None\n self.get_value = None # Needs for radio buttons\n self.include_friday = None\n self.table_order_default = None\n self.table_order_type = None\n self.table_name_insertion_box = None\n\n # Cost center files & data\n self.cost_center_string = \"None\"\n self.file = 'cost_center.cvs'\n self.department_cost_dict = {}\n\n # Stores all errors found from receiver.py\n self.error_data_list = []\n\n # Semester & Department & Year from university website\n self.web_semesters_options = []\n self.web_department_options = []\n self.web_year_options = []\n\n # Stores data about room capacity\n self.room_cap_dict = room_capacity.RoomCapacity().get_capacity()\n\n # A label which will keep updating once user choose a data file\n self.button_text = tk.StringVar()\n self.button_text.set(\"File(s) Selected: \")\n self.create_files_names = Button(self.selection_window, border=0,\n textvariable=self.button_text, command=self.change_files_window,\n foreground=\"gray\", font=(\"Arial\", 11, \"bold\"))\n\n # User directory shortcut\n self.user_directory = \"/\"\n\n # Other\n self.payroll_selection = None\n self.listbox = None\n self.folder = None\n self.cost_box_insert = None\n self.mini_frame = None\n self.move_next_step = None\n self.payroll_year_1 = None\n self.payroll_semester_1 = None\n self.payroll_year_2 = None\n self.payroll_semester_2 = None\n\n # Deletes previous files\n shutil.rmtree('copy_folder', ignore_errors=True)\n shutil.rmtree('__excel_files', ignore_errors=True)\n shutil.rmtree('web_files', ignore_errors=True)\n\n def receive_semesters():\n try:\n p = previous_semesters.ReceiveSemesters().return_courses_semesters()\n return p\n except requests.exceptions.ConnectionError:\n messagebox.showwarning(title=\"Connection Error\", message=\"Check your connection. Some functions might \"\n \"not work properly\")\n\n self.param = receive_semesters()\n self.organize_semester_data()\n\n try:\n excel_file = glob.glob('__excel_files/*.xlsx')\n if not excel_file:\n pass\n else:\n open(excel_file[0], \"r+\")\n except IOError:\n messagebox.showerror(\"Close File\", \"Please close excel files to eliminate errors\")\n self.introduction_window()\n\n def organize_semester_data(self):\n for param_len in range(len(self.param)):\n # Finds available options from scraping\n for key in self.param[param_len]:\n test_dict = dict()\n if key[0:4] == \"FALL\" or key[0:4] == \"SPRI\":\n find_year_index = key.find(\"2\")\n self.web_semesters_options.append(key[0:find_year_index])\n self.web_year_options.append(key[find_year_index:])\n test_dict[key[0:find_year_index]] = self.param[param_len].get(key)\n self.urlencode_dict_list.append(test_dict)\n\n else:\n symbol_index = key.find(\"(\")\n self.web_department_options.append(key[symbol_index + 1:-1])\n test_dict[key[symbol_index + 1:-1]] = self.param[param_len].get(key)\n self.urlencode_dict_list.append(test_dict)\n\n # Insert additional shortkey departments\n self.web_department_options.append(\"All COB Departments\")\n self.web_department_options.append(\"ACCT & BLAW & MACC\")\n self.web_department_options.append(\"MRKT & IBUS\")\n self.web_department_options.append(\"MGMT & MBA\")\n\n def submit_ticket_form(self):\n \"\"\"Opens a Google Form to collect any reports or requests\"\"\"\n webbrowser.open(self.GOOGLE_FORM_URL)\n\n def open_instructions_url(self):\n \"\"\"Instructions on how to use this program\"\"\"\n webbrowser.open(self.INSTRUCTIONS_URL)\n\n def open_latest_release(self):\n webbrowser.open(self.LATEST_RELEASE_URL)\n\n def main_text_interface(self, button_frame, title_text, back_button_function, description_text=None,\n x_description=18, y_title=20, remove_back=False):\n title_label = ttk.Label(button_frame,\n text=title_text,\n foreground=\"green\",\n font=('Arial', 18))\n title_label.grid(sticky='W',\n column=0,\n columnspan=2,\n row=0,\n rowspan=2,\n padx=250,\n pady=y_title)\n if description_text is None:\n pass\n else:\n # Description of a reason to have this window\n description_label = ttk.Label(button_frame,\n text=description_text,\n foreground=\"gray\",\n font=('Arial', 12))\n\n description_label.grid(column=0,\n row=1,\n rowspan=2,\n padx=x_description,\n pady=75)\n if remove_back is False:\n back_button = Button(button_frame,\n border='0',\n image=self.BackImage,\n command=back_button_function)\n\n back_button.grid(sticky='WN',\n column=0,\n row=1,\n rowspan=2,\n pady=15,\n padx=10)\n\n def interface_window_remover(self):\n \"\"\"Removes window once a user goes to a next step or previous step.\"\"\"\n\n if self.introduction:\n self.introduction.grid_remove()\n\n if self.payroll_window:\n self.payroll_window.grid_remove()\n self.cost_department_list.grid_remove()\n self.cost_dict()\n self.payroll_window = None\n\n if self.get_example_window:\n self.get_example_window.grid_remove()\n\n if self.selection_window:\n self.selection_window.grid_remove()\n\n if self.files_manipulation_window:\n self.files_manipulation_window.grid_remove()\n\n if self.settings_window:\n self.settings_window.grid_remove()\n\n if self.creating_step_window:\n self.creating_step_window.grid_remove()\n\n if self.notification_window:\n self.notification_window.grid_remove()\n\n self.create_files_names.place_forget()\n\n def select_excel_files(self):\n \"\"\"Once a user selects the file - it will hold in the list.\"\"\"\n self.file_name = Frame(self).filename = filedialog.askopenfilenames(initialdir=self.user_directory,\n title=\"Select Excel file\",\n filetypes=((\"excel files\", \"*.xlsx\"),\n (\"all files\", \"*.*\")))\n if not self.file_name:\n pass\n else:\n self.user_directory = str()\n # For display and files store\n for filesAmount in range(len(self.file_name)):\n split_user_directory = self.user_directory.split(\"/\")\n split_user_directory = (split_user_directory[0:len(split_user_directory) - 1])\n for dir_length in range(len(split_user_directory)):\n # Stores user directory of the previously selected file to access easily next time\n self.user_directory += split_user_directory[dir_length] + \"/\"\n self.files_show_directory.append(self.file_name[filesAmount])\n self.display_excel_files()\n\n def display_excel_files(self):\n \"\"\"Shows to the user which files has been chosen\"\"\"\n # Prepares the file names into the proper format.\n self.files_show_names = []\n for i in self.files_show_directory:\n z = 0\n for _ in i:\n z -= 1\n if i[z] == '/' or i[z] == '\\\\':\n self.files_show_names.insert(0, i[z + 1:])\n break\n\n if len(self.files_show_names) == 1:\n self.files_string = (\"File(s) Selected: \" + \" \".join(self.files_show_names))\n # Once file is chosen \"Create\" and \"Choose existing\" buttons will be available\n self.create_table_button.configure(state=\"normal\",\n relief=\"groove\",\n bg='#c5eb93',\n border='4')\n\n self.update_button_text(self.files_string)\n\n # Adds a comma if the number of files more than one\n elif len(self.files_show_names) >= 2:\n self.files_string = (\"File(s) Selected: \" + \", \".join(self.files_show_names))\n max_length_allowed = 76\n\n # Removes the strings if the number of words exceeds the limit.\n while len(self.files_string) > max_length_allowed:\n self.files_string = self.files_string[:-1]\n\n # Adds the triple dots if the number of words exceeds the limit\n if len(self.files_string) >= max_length_allowed:\n self.files_string = self.files_string + \"...\\n\"\n # Updates the file selected text.\n self.update_button_text(self.files_string)\n\n def update_button_text(self, text):\n \"\"\"Updates the string in the GUI\"\"\"\n self.button_text.set(text)\n\n def introduction_window(self):\n \"\"\"Window gains information necessary information to create a payroll table from user\"\"\"\n # Reset window info\n self.files_show_names = []\n self.files_show_directory = []\n self.button_text.set(\"File(s) Selected: \")\n\n self.payroll_selection = False\n self.interface_window_remover()\n button_frame = self.introduction = Frame(self)\n button_frame.grid()\n\n # Short welcome text\n heading_text = ttk.Label(button_frame,\n text=\"Select one of the following:\",\n foreground=\"green\",\n font=('Arial', 21))\n # Placing coordinates\n heading_text.grid(column=0,\n columnspan=3,\n row=0,\n padx=160,\n pady=25,\n sticky=\"W\")\n get_previous_button = Button(button_frame,\n border='0',\n image=self.GetPreviousImage,\n command=self.get_table_example_window)\n get_previous_button.grid(column=0,\n row=4,\n sticky='w',\n padx=65)\n\n create_master_button = Button(button_frame,\n border='0',\n image=self.CreateMasterImage,\n command=self.selection_step_window)\n create_master_button.grid(column=0,\n row=4,\n sticky='w',\n padx=245)\n\n create_payroll_button = Button(button_frame,\n border='0',\n image=self.CreatePayrollImage,\n command=self.payroll_cost_center)\n create_payroll_button.grid(column=0,\n row=4,\n sticky='w',\n padx=425)\n\n def selection_step_window(self):\n # Removes any other necessary window\n if self.payroll_window is not None:\n self.payroll_selection = True\n self.interface_window_remover()\n\n # Creates a frame\n button_frame = self.selection_window = Frame(self)\n button_frame.grid()\n\n # Sets repeated text\n if self.payroll_selection is False:\n self.main_text_interface(button_frame, title_text=\"Master Table\",\n back_button_function=self.introduction_window,\n description_text=\"The program will create a master table based on Excel files\")\n\n else:\n self.main_text_interface(button_frame, title_text=\"Payroll Table\",\n back_button_function=self.payroll_cost_center,\n description_text=\"The program will create a payroll table based on Excel files\")\n\n # A button to select files\n select_files_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Select all Excel files to continue\",\n command=self.select_excel_files,\n foreground=\"green\",\n font=('Arial', 18, 'bold'))\n select_files_button.place(x=126, y=120)\n\n # Sets location for files selected\n self.create_files_names.place(x=8, y=207)\n\n # Short description for select button\n select_files_description = tk.Label(button_frame,\n text='Select an excel file/files which you would '\n 'like to make a table from',\n foreground=\"gray\",\n font=(\"Arial\", 10, 'bold'))\n select_files_description.place(x=105, y=178)\n\n # Allows to Change/View/Delete file(s)\n modify_files_button = tk.Button(button_frame,\n border=0,\n text='Change/View/Delete file(s)',\n command=self.change_files_window,\n foreground=\"gray\",\n font=(\"Arial\", 10, \"bold\", 'underline'))\n modify_files_button.place(x=8, y=246)\n if self.payroll_selection is False:\n self.create_table_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Create an Excel table\",\n command=self.table_setting_window,\n foreground=\"green\",\n font=('Arial', 16, 'bold'))\n self.create_table_button.grid(column=0,\n columnspan=3,\n row=8,\n pady=115,\n padx=400)\n else:\n self.create_table_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Select folder to save\",\n command=self.create_payroll_table,\n foreground=\"green\",\n font=('Arial', 16, 'bold'))\n self.create_table_button.grid(column=0,\n columnspan=3,\n row=8,\n pady=115,\n padx=400)\n if not self.files_show_directory:\n # Will allow going to the next window once you selected at least one file\n self.create_table_button.configure(bg=\"#d9dad9\",\n relief=SUNKEN,\n border='1',\n state=\"disabled\")\n\n def delete_list_element(self):\n \"\"\"Deletes selected file from a list\"\"\"\n\n def get_element_value(listbox):\n value = listbox.get(ACTIVE)\n return value\n\n def delete_list_element(listbox, list_1, list_2, v_index):\n # Removes element from two lists\n listbox.delete(ACTIVE)\n del list_1[v_index]\n list_2_value = list_2[::-1][v_index]\n list_2.remove(list_2_value)\n return list_1, list_2\n\n try:\n selected_file = get_element_value(self.listbox)\n if len(selected_file) == 0:\n self.selection_step_window()\n else:\n ask_message = \"Would you like to delete \" + selected_file + \" file?\"\n user_response = messagebox.askokcancel(\"Uni-Scheduler\", ask_message)\n if user_response is True:\n val_index = self.files_show_names.index(selected_file)\n self.files_show_names, self.files_show_directory = delete_list_element(self.listbox,\n self.files_show_names,\n self.files_show_directory,\n val_index)\n else:\n pass\n except ValueError:\n # Returns to selection window if no files in a list\n self.selection_step_window()\n self.update_button_text((\"File(s) Selected: \" + \" \".join(self.files_show_names)))\n\n def change_list_element(self):\n \"\"\"Removes and adds a file\"\"\"\n get_files_amount = len(self.files_show_names)\n if get_files_amount == 0:\n # Returns to selection window if no files in a list\n self.selection_step_window()\n else:\n self.select_excel_files()\n\n def update_listbox(listbox, file):\n # Updates the list box to include selected file\n listbox.insert(END, file)\n\n if get_files_amount < len(self.files_show_names):\n self.delete_list_element()\n update_listbox(self.listbox, self.files_show_names[0])\n\n def change_files_window(self):\n \"\"\"The window for changing/deleting selected files\"\"\"\n # Removes previous window\n self.interface_window_remover()\n button_frame = self.files_manipulation_window = Frame(self)\n button_frame.grid()\n if self.payroll_selection is False:\n self.main_text_interface(button_frame, title_text=\"Modify Files\",\n back_button_function=self.selection_step_window,\n description_text=\"Change or delete the file from the current list\",\n x_description=0)\n else:\n\n self.main_text_interface(button_frame, title_text=\"Modify Files\",\n back_button_function=self.selection_step_window,\n description_text=\"Change or delete the file from the current list\",\n x_description=0)\n\n heading_label = ttk.Label(button_frame,\n text=\"List of files selected:\",\n foreground=\"green\",\n font=('Arial', 14))\n heading_label.grid(sticky='WN',\n column=0,\n row=2,\n rowspan=3,\n padx=13,\n pady=20)\n\n table_list_window = Frame(button_frame, width=300, height=100, bd=0)\n table_list_window.place(x=15, y=110)\n scrollbar = Scrollbar(table_list_window, orient=VERTICAL)\n\n self.listbox = Listbox(table_list_window, yscrollcommand=scrollbar.set, selectmode=SINGLE, font=0, bd=1)\n self.listbox.config(width=32, height=10)\n scrollbar.config(command=self.listbox.yview)\n scrollbar.pack(side=RIGHT, fill=Y)\n self.listbox.pack(side=LEFT)\n\n for item in self.files_show_names:\n self.listbox.insert(END, item)\n change_button = tk.Button(button_frame,\n text=\"Change file\",\n command=self.change_list_element,\n foreground=\"green\",\n bg='#f0f8ff',\n border='4',\n relief=\"groove\",\n font=('Arial', 14))\n\n change_button.place(x=430, y=125)\n\n delete_button = tk.Button(button_frame,\n text=\"Delete file\",\n command=self.delete_list_element,\n foreground=\"green\",\n bg='#f0f8ff',\n border='4',\n relief=\"groove\",\n font=('Arial', 14))\n\n delete_button.place(x=435, y=175)\n\n continue_button = tk.Button(button_frame,\n text=\"Continue\",\n command=self.selection_step_window,\n foreground=\"green\",\n bg='#c5eb93',\n border='4',\n relief=\"groove\",\n font=('Arial', 14))\n continue_button.grid(column=0,\n columnspan=2,\n row=3,\n rowspan=6,\n sticky=\"W\",\n padx=440,\n pady=90)\n\n def get_table_example_window(self):\n \"\"\"Creates a table based on previous semesters web data\"\"\"\n self.interface_window_remover()\n\n button_frame = self.get_example_window = Frame(self)\n button_frame.grid()\n\n heading_label = ttk.Label(button_frame,\n text=\"Get a table by department\",\n foreground=\"green\",\n font=('Arial', 18))\n heading_label.grid(column=0,\n row=0,\n padx=90,\n pady=25,\n sticky=\"n\")\n\n # Description of a reason to have this window\n description_label = ttk.Label(button_frame,\n text=\"The program will generate a table by using data from MNSU website\",\n foreground=\"gray\",\n font=('Arial', 12))\n\n description_label.grid(column=0,\n row=0,\n rowspan=2,\n padx=85,\n pady=75)\n\n # Holds variables\n variable_web_semesters = StringVar(button_frame)\n variable_web_years = StringVar(button_frame)\n variable_web_department = StringVar(button_frame)\n\n # Sets defaults values for interface\n variable_web_department.set(self.web_department_options[0])\n variable_web_semesters.set(self.web_semesters_options[0])\n variable_web_years.set(self.web_year_options[0])\n # Sets defaults values for script\n self.web_department_parameters = self.web_department_options[0]\n self.web_year = self.web_year_options[0]\n self.web_semester_parameters = self.web_semesters_options[0]\n\n department_selection_label = tk.Label(button_frame,\n text=\"Select department:\",\n font=('Arial', 16))\n department_selection_label.place(x=30, y=160)\n\n selection_label = tk.Label(button_frame,\n text=\"Select the semester and year: \",\n font=('Arial', 16))\n selection_label.place(x=30, y=200)\n\n # Option menu / Check buttons\n web_department_options = OptionMenu(button_frame,\n variable_web_department,\n *self.web_department_options,\n command=self.return_web_department)\n web_department_options.place(x=230, y=160)\n web_department_options.configure(relief=\"groove\",\n bg='#c5eb93',\n border='4',\n foreground=\"green\",\n font=('Arial', 10, 'bold'))\n\n web_semester_options = OptionMenu(button_frame,\n variable_web_semesters,\n *self.web_semesters_options,\n command=self.return_web_semester)\n web_semester_options.place(x=320, y=200)\n web_semester_options.configure(relief=\"groove\",\n bg='#c5eb93',\n border='4',\n foreground=\"green\",\n font=('Arial', 10, 'bold'))\n\n web_year_options = OptionMenu(button_frame,\n variable_web_years,\n *self.web_year_options,\n command=self.return_web_year)\n web_year_options.place(x=425, y=200)\n web_year_options.configure(relief=\"groove\",\n bg='#c5eb93',\n border='4',\n foreground=\"green\",\n font=('Arial', 10, 'bold'))\n\n create_table_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Get tables and choose folder\",\n command=self.create_web_table,\n foreground=\"green\",\n font=('Arial', 14))\n\n create_table_button.grid(sticky=\"E\",\n column=0,\n columnspan=2,\n row=3,\n padx=20,\n pady=110)\n back_button = Button(button_frame,\n border='0',\n image=self.BackImage,\n command=self.introduction_window)\n\n back_button.grid(sticky='WN',\n column=0,\n row=0,\n rowspan=2,\n pady=15,\n padx=8)\n\n def table_setting_window(self):\n \"\"\"Gives the ability to provide additional changes to the table if the user wants to.\"\"\"\n\n # Removes previous window\n self.interface_window_remover()\n\n # Creates a new frame\n button_frame = self.settings_window = Frame(self)\n button_frame.grid()\n\n self.main_text_interface(button_frame, title_text=\"Master Table\",\n back_button_function=self.selection_step_window,\n description_text=\"Set up the settings for a table(s).\",\n x_description=200, y_title=28)\n\n # Select table type\n self.get_value = tk.IntVar()\n self.include_friday = tk.IntVar()\n\n # Allow the user to select the day order(incomplete)\n self.table_order_default = Radiobutton(button_frame,\n text=\"Default order\",\n font=('Arial', 11),\n variable=self.get_value,\n command=self.user_table_choice,\n value=1)\n self.table_order_default.grid(column=0,\n row=2,\n sticky='sw',\n padx=11,\n pady=0)\n\n self.table_order_type = Radiobutton(button_frame,\n text=\"Days in order\",\n font=('Arial', 11),\n variable=self.get_value,\n command=self.user_table_choice,\n value=2)\n self.table_order_type.grid(column=0,\n row=2,\n sticky='sw',\n padx=148,\n pady=0)\n\n out_order_button = Button(button_frame,\n border='0',\n image=self.OutOrderImage,\n command=self.out_order_select)\n out_order_button.grid(column=0,\n row=3,\n rowspan=4,\n sticky='w',\n padx=11,\n pady=0)\n\n in_order_button = Button(button_frame,\n border='0',\n image=self.InOrderImage,\n command=self.in_order_select)\n in_order_button.grid(column=0,\n row=3,\n rowspan=4,\n sticky='w',\n padx=150)\n\n table_name_label = ttk.Label(button_frame,\n text=\"Name the table: \",\n foreground=\"green\",\n font=('Arial', 18))\n table_name_label.grid(column=0,\n columnspan=3,\n row=2,\n sticky='ES',\n padx=80,\n pady=10)\n\n # A box to allow user type a name of the table they desire\n self.table_name_insertion_box = Text(button_frame,\n height=1.2,\n width=27)\n self.table_name_insertion_box.grid(column=0,\n columnspan=3,\n row=2,\n rowspan=4,\n sticky='EN',\n padx=33,\n pady=75)\n\n self.table_name_insertion_box.insert(END, \" Type name here...\")\n self.table_name_insertion_box.bind(\"<1>\", self.name_of_table)\n self.table_name_insertion_box.configure(font=('Courier', 12, 'italic'),\n foreground=\"gray\")\n self.table_name_insertion_box.bind(\"\", self.return_table_name)\n\n # Will provide the user with a four-year option depending on your current year.\n year_options = []\n for i in range(4):\n year_options.append(datetime.date.today().year + (i - 1))\n\n # Holds variables\n variable_semesters = StringVar(button_frame)\n variable_years = StringVar(button_frame)\n today_year = datetime.datetime.now()\n\n semesters_options = [\"Fall\", \"Spring\", \"Summer 1st\", \"Summer 2nd\", ]\n # Sets a Fall semester as a default\n variable_semesters.set(semesters_options[0])\n\n # Set the current year automatically\n for i in year_options:\n if today_year.year == i:\n variable_years.set(today_year.year)\n else:\n pass\n\n semester_label = ttk.Label(button_frame,\n text=\"Select the semester and year: \",\n foreground=\"green\",\n font=('Arial', 16))\n semester_label.place(x=332, y=190)\n\n # Option menu / Check buttons\n semester_options_menu = OptionMenu(button_frame,\n variable_semesters,\n *semesters_options,\n command=self.return_semester)\n semester_options_menu.grid(column=0,\n columnspan=3,\n row=3,\n rowspan=4,\n sticky='e',\n pady=34,\n padx=111)\n semester_options_menu.configure(relief=\"groove\",\n bg='#c5eb93',\n border='4',\n foreground=\"green\",\n font=('Arial', 10, 'bold'))\n\n year_options_menu = OptionMenu(button_frame,\n variable_years,\n *year_options,\n command=self.return_year)\n year_options_menu.grid(column=0,\n columnspan=3,\n row=3,\n rowspan=4,\n sticky='e',\n pady=34,\n padx=37)\n year_options_menu.configure(relief=\"groove\",\n bg='#c5eb93',\n border='4',\n foreground=\"green\",\n font=('Arial', 10, 'bold'))\n\n friday_option = Checkbutton(button_frame,\n text=\"Include Friday\",\n variable=self.include_friday,\n font=('Arial', 10))\n friday_option.grid(sticky=\"nw\",\n column=0,\n row=7,\n padx=7)\n\n next_step_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Select Folder\",\n command=self.create_master_table,\n foreground=\"green\",\n font=('Arial', 16, 'bold'))\n next_step_button.grid(column=0,\n columnspan=3,\n row=5,\n rowspan=5,\n sticky='ENS',\n pady=100,\n padx=37)\n\n def name_of_table(self, event):\n user_input = self.table_name_insertion_box.get(\"1.0\", END)\n if user_input[:21] == \" Type name here...\":\n self.table_name_insertion_box.delete(\"1.0\", END)\n self.table_name_insertion_box.insert(END, \" \")\n self.table_name_insertion_box.configure(font=('Courier', 12, 'bold'),\n foreground=\"black\")\n\n def create_master_table(self):\n \"\"\"Moves into the creation process\"\"\"\n self.table_friday_include = self.include_friday.get()\n self.program_loading_window(block_table=True, payroll_table=False)\n\n def create_web_table(self):\n self.program_loading_window(block_table=False, payroll_table=False)\n\n def create_payroll_table(self):\n \"\"\"Moves into the creation process\"\"\"\n self.table_friday_include = 1\n self.program_loading_window(block_table=False, payroll_table=True)\n\n def user_table_choice(self):\n \"\"\"Table days order\"\"\"\n self.table_settings_type = self.get_value.get()\n\n def in_order_select(self):\n \"\"\"Sets variable 2 if days in order\"\"\"\n self.table_order_type.select()\n self.table_settings_type = 2\n\n def out_order_select(self):\n \"\"\"Sets variable 1 if follows standard order\"\"\"\n self.table_order_default.select()\n self.table_settings_type = 1\n\n def return_year(self, year_value):\n \"\"\"Captures user selection - year\"\"\"\n self.table_settings_year = year_value\n\n def return_web_department(self, department):\n self.web_department_parameters = department\n\n def return_web_year(self, year):\n self.web_year = year\n\n def return_web_semester(self, semester):\n self.web_semester_parameters = semester\n\n def return_payroll_year_1(self, year):\n self.payroll_year_1 = year\n\n def return_payroll_semester_1(self, semester):\n self.payroll_semester_1 = semester\n\n def return_payroll_year_2(self, year):\n self.payroll_year_2 = year\n\n def return_payroll_semester_2(self, semester):\n self.payroll_semester_2 = semester\n\n def return_semester(self, semester_value):\n \"\"\"Captures user selection - semester\"\"\"\n self.table_settings_semester = semester_value\n\n def return_table_name(self, event):\n \"\"\"Presents a user what he wrote as a table name\"\"\"\n self.table_settings_name = self.table_name_insertion_box.get(\"1.0\", END)\n\n def return_cost_center_list(self, event):\n \"\"\"Presents a user what he wrote as a table name\"\"\"\n self.cost_center_string += self.table_name_insertion_box.get(\"1.0\", END)\n\n def open_master_table(self):\n \"\"\"Opens a master table\"\"\"\n try:\n excel_file = self.table_settings_name.replace('\\n', ' ').replace('\\r', '')\n excel_file = excel_file.replace(\" \", \"\")\n excel_file = str(excel_file) + '.' + 'xlsx'\n excel_file = os.path.join(self.folder, excel_file)\n os.startfile(excel_file)\n except FileNotFoundError:\n for filename in glob.glob(os.path.join(self.folder, '*.xlsx')):\n os.startfile(filename)\n\n def open_payroll_folder(self):\n os.startfile(self.folder)\n\n def open_excel_copies(self):\n \"\"\"For error window\"\"\"\n folder_path = \"copy_folder\\\\\"\n for filename in glob.glob(os.path.join(folder_path, '*.xlsx')):\n os.startfile(filename)\n\n def exit_function(self):\n sys.exit()\n\n def program_loading_window(self, block_table=True, payroll_table=False, count=0):\n self.interface_window_remover()\n\n button_frame = self.creating_step_window = Frame(self)\n button_frame.grid()\n\n self.folder = filedialog.askdirectory(title='Please select a directory')\n if self.folder == \"\":\n if count == 1:\n self.introduction_window()\n else:\n self.program_loading_window(block_table, payroll_table, count=1)\n\n else:\n def create_web_table(web_department_parameters, urlencode_dict_list, web_semester_parameters, web_year,\n web_department_options, folder):\n \"\"\"Department chairs might need an example of a file from the previous semester.\n This function will create a table based on university records.\"\"\"\n urlencode_list = []\n\n folder_path = folder\n\n def create_table(urlencode_dict, web_department, web_semester, c_web_year, get_all_tables=False):\n if not urlencode_dict:\n previous_data.PreviousCourses(folder_path, web_department, web_semester, int(c_web_year),\n get_all=get_all_tables)\n else:\n for len_list in range(len(urlencode_dict)):\n for departament_semester, urlencode in urlencode_dict[len_list].items():\n if departament_semester == web_department:\n urlencode_list.append(urlencode)\n if departament_semester == web_semester:\n urlencode_list.append(urlencode)\n if len(urlencode_list) == 2:\n if get_all_tables is True:\n previous_data.PreviousCourses(folder_path, web_department, web_semester,\n int(c_web_year), urlencode_list[0], urlencode_list[1],\n get_all=get_all_tables)\n else:\n previous_data.PreviousCourses(folder_path, web_department, web_semester,\n int(c_web_year), urlencode_list[0], urlencode_list[1],\n get_all=get_all_tables)\n else:\n if get_all_tables is True:\n previous_data.PreviousCourses(folder_path, web_department, web_semester,\n int(c_web_year), get_all=get_all_tables)\n else:\n previous_data.PreviousCourses(folder_path, web_department, web_semester,\n int(c_web_year), get_all=get_all_tables)\n\n if web_department_parameters not in [\"All COB Departments\", \"ACCT & BLAW & MACC\",\n \"MRKT & IBUS\", \"MGMT & MBA\"]:\n try:\n create_table(urlencode_dict_list, web_department_parameters,\n web_semester_parameters, web_year)\n if os.path.isdir(folder_path):\n os.startfile(folder_path)\n except PermissionError:\n messagebox.showwarning(\"Existing excel file open!\",\n \"Please close your current excel files and try again.\")\n else:\n if web_department_parameters == \"All COB Departments\":\n web_department_options.remove('MGMT & MBA')\n web_department_options.remove('MRKT & IBUS')\n web_department_options.remove('ACCT & BLAW & MACC')\n web_department_options.remove('All COB Departments')\n dep = iter(web_department_options)\n for department in dep:\n create_table(urlencode_dict_list, department,\n web_semester_parameters, web_year, get_all_tables=True)\n elif web_department_parameters == \"ACCT & BLAW & MACC\":\n create_table(urlencode_dict_list, \"ACCT\",\n web_semester_parameters, web_year)\n create_table(urlencode_dict_list, \"BLAW\",\n web_semester_parameters, web_year)\n create_table(urlencode_dict_list, \"MACC\",\n web_semester_parameters, web_year)\n elif web_department_parameters == \"MRKT & IBUS\":\n create_table(urlencode_dict_list, \"MRKT\",\n web_semester_parameters, web_year)\n create_table(urlencode_dict_list, \"IBUS\",\n web_semester_parameters, web_year)\n elif web_department_parameters == \"MGMT & MBA\":\n create_table(urlencode_dict_list, \"MGMT\",\n web_semester_parameters, web_year)\n create_table(urlencode_dict_list, \"MBA\",\n web_semester_parameters, web_year)\n else:\n pass\n if os.path.isdir(folder_path):\n os.startfile(folder_path)\n\n global switch\n switch = False\n\n def processor():\n global switch\n\n wait_text = StringVar()\n while not switch:\n wait_label = tk.Label(button_frame, textvariable=wait_text,\n foreground=\"green\",\n font=('Courier', 20, 'bold'))\n wait_label.grid(column=1, row=2, rowspan=2, padx=10, pady=10)\n if block_table is True:\n wait_text.set(\"\\r \\n \\n \\n Creating a scheduling table...\")\n elif payroll_table is True:\n wait_text.set(\"\\r \\n \\n \\n Creating a Payroll table...\")\n else:\n wait_text.set(\"\\r \\n \\n \\n Creating a table from web...\")\n\n wait_label.configure(textvariable=wait_text)\n\n sys.stdout.flush()\n time.sleep(0.1)\n\n processor_threading = threading.Thread(target=processor, name=\"processor thread\")\n processor_threading.start()\n button_frame.update()\n\n # Moves to the next class which is processing all the files\n # try:\n if block_table is True:\n self.error_data_list = receiver.DataProcessor(self.folder,\n self.files_show_directory, self.table_settings_name,\n self.table_settings_semester, self.table_settings_year,\n self.table_settings_type,\n self.table_friday_include,\n self.room_cap_dict, payroll_table).get_excel_errors()\n\n self.user_result_window()\n\n elif payroll_table is False:\n create_web_table(self.web_department_parameters, self.urlencode_dict_list,\n self.web_semester_parameters, self.web_year, self.web_department_options,\n folder=self.folder)\n self.introduction_window()\n else:\n self.error_data_list = receiver.DataProcessor(self.folder, self.files_show_directory,\n self.table_settings_name,\n self.table_settings_semester,\n self.table_settings_year,\n self.table_settings_type,\n self.table_friday_include,\n self.room_cap_dict, payroll_table)\n\n self.payroll_finish_window()\n \"\"\"\n except Exception as e:\n tk.messagebox.showerror(title=\"Program failed\", message=\"Program failed... Please try again.\")\n self.introduction_window()\n\n switch = True\n \"\"\"\n\n def user_result_window(self):\n self.interface_window_remover()\n\n def clear_error_data(error_data):\n \"\"\"Clears all unnecessary errors.\"\"\"\n clear_data_list = []\n clear_data_dict = {}\n for i in range(len(error_data)):\n if error_data[i - 1].get(\"Color\") == 'FF687B' or error_data[i - 1].get(\"Color\") == 'FEBBBB':\n try:\n if error_data[i - 1].get(\"Comment\") == error_data[i].get(\"Comment\"):\n pass\n else:\n clear_data_dict['Message'] = error_data[i - 1].get(\"Comment\")\n clear_data_list.append(clear_data_dict.copy())\n except IndexError:\n pass\n return clear_data_list\n\n def remove_dict_duplicates(error_data):\n new_dict_list = []\n for i in range(len(error_data)):\n if error_data[i] not in error_data[i + 1:]:\n new_dict_list.append(error_data[i])\n return new_dict_list\n\n if self.error_data_list == 'User_Doesnt_Listen':\n self.interface_window_remover()\n self.selection_step_window()\n\n clear_error_list = clear_error_data(self.error_data_list)\n clear_error_list = remove_dict_duplicates(clear_error_list)\n if len(clear_error_list) != 0:\n\n button_frame = self.notification_window = Frame(self)\n button_frame.grid()\n\n self.main_text_interface(button_frame, title_text=\"Master Table\",\n back_button_function=self.selection_step_window,\n remove_back=True)\n\n total_number_mistakes = str(int(len(clear_error_list) / 2))\n if total_number_mistakes == '0':\n total_number_mistakes = '1'\n total_number_mistakes = 'Possible mistakes: ' + total_number_mistakes\n red_error_label = tk.Label(button_frame,\n text=total_number_mistakes,\n foreground=\"gray\",\n font=('Courier', 14, 'bold'))\n red_error_label.grid(sticky='w',\n column=0,\n columnspan=4,\n row=2,\n padx=10)\n\n # Creating a scroll window of errors\n def scroll_error_messages():\n \"\"\"Shows all the errors\"\"\"\n for error_len in range(len(clear_error_list)):\n if str(clear_error_list[error_len].get(\"Message\")) == \"None\":\n pass\n elif str(clear_error_list[error_len].get(\n \"Message\")) == \"A program couldn't read this row correctly. \" \\\n \"Report it if needed.\":\n ui_message = 'Check for additional errors by pressing \"Open excel copies\"' + (' ' * 100)\n Label(frame, text=ui_message, background=\"#ee8282\").grid(sticky=\"w\", row=99, column=0)\n else:\n # Shows only even to eliminate repetitive conflicts\n if error_len % 2 == 0:\n conflict_row_message = str(clear_error_list[error_len].get(\"Message\"))\n Label(frame, text=conflict_row_message, background=\"#ee8282\").grid(sticky=\"w\",\n row=error_len, column=0)\n\n def show_all_messages(event):\n canvas.configure(scrollregion=canvas.bbox(\"all\"), width=600, height=60)\n\n error_message_frame = Frame(button_frame, relief=GROOVE, width=600, height=150, bd=1)\n error_message_frame.place(x=22, y=100)\n\n canvas = Canvas(error_message_frame)\n frame = Frame(canvas)\n\n # Scroll bar on a right side\n user_scrollbar_y = tk.Scrollbar(error_message_frame, orient=\"vertical\")\n user_scrollbar_y.pack(side=RIGHT, fill=Y)\n canvas.configure(yscrollcommand=user_scrollbar_y.set)\n canvas.pack(side=RIGHT, fill=BOTH)\n user_scrollbar_y.config(command=canvas.yview)\n\n canvas.create_window((0, 0), window=frame, anchor='nw')\n frame.bind(\"\", show_all_messages)\n frame.bind(\"\", show_all_messages)\n frame.bind(\"\", show_all_messages)\n\n instructions_message = 'Use \"Open excel copies\" button to check if the program ' \\\n 'found any conflicts or missing information.'\n\n instructions_message_label = ttk.Label(button_frame,\n text=instructions_message,\n foreground=\"gray\",\n font=('Arial', 10, 'bold'))\n instructions_message_label.grid(sticky=\"W\",\n column=0,\n columnspan=3,\n row=3,\n padx=13,\n pady=31)\n scroll_error_messages()\n\n open_copies_button = Button(button_frame,\n border='0',\n image=self.ExcelCopyFile,\n command=self.open_excel_copies)\n open_copies_button.grid(sticky=\"NW\",\n column=0,\n row=3,\n rowspan=4,\n padx=20,\n pady=90)\n\n open_copies_text = Button(button_frame,\n border='0',\n text=\"Open excel copies\",\n command=self.open_excel_copies,\n foreground=\"gray\",\n font=('Arial', 11, 'bold'))\n open_copies_text.grid(sticky=\"W\",\n column=0,\n row=3,\n rowspan=4,\n padx=8,\n pady=193)\n\n open_main_button = Button(button_frame,\n border='0',\n image=self.ExcelMainFile,\n command=self.open_master_table)\n open_main_button.grid(sticky=\"N\",\n column=0,\n columnspan=2,\n row=3,\n rowspan=4,\n pady=90)\n\n open_main_text = Button(button_frame,\n border='0',\n text=\"Open master table\",\n command=self.open_master_table,\n foreground=\"gray\",\n font=('Arial', 11, 'bold'))\n open_main_text.grid(sticky=\"WE\",\n column=0,\n columnspan=3,\n row=3,\n rowspan=4,\n padx=150,\n pady=0)\n\n exit_button = Button(button_frame,\n border='0',\n image=self.ExitApplicationImage,\n command=self.exit_function)\n exit_button.grid(sticky=\"NE\",\n column=0,\n columnspan=3,\n row=3,\n rowspan=4,\n padx=25,\n pady=90)\n\n exit_text = Button(button_frame,\n border='0',\n text=\"Exit\",\n command=self.exit_function,\n foreground=\"gray\",\n font=('Arial', 11, 'bold'))\n exit_text.grid(sticky=\"E\",\n column=0,\n columnspan=4,\n row=3,\n rowspan=4,\n padx=63,\n pady=0)\n else:\n\n button_frame = self.notification_window = Frame(self)\n button_frame.grid()\n\n instructions_message = \"Everything looks great! ✔\"\n\n no_errors_label = ttk.Label(button_frame,\n text=instructions_message,\n foreground=\"green\",\n font=('Arial', 24, 'bold'))\n no_errors_label.grid(column=0,\n columnspan=3,\n row=1,\n padx=130,\n pady=30)\n\n open_file_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Open Master Table\",\n command=self.open_master_table,\n foreground=\"green\",\n font=('Arial', 20, 'bold'))\n open_file_button.grid(columnspan=3,\n row=2,\n pady=40)\n\n exit_program_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Exit\",\n command=self.exit_function,\n foreground=\"green\",\n font=('Arial', 16, 'bold'))\n exit_program_button.grid(columnspan=3,\n row=2,\n rowspan=3,\n pady=65)\n\n user_feedback_button = Button(button_frame,\n border='0',\n text=\"Please provide feedback about your experience.\",\n command=self.submit_ticket_form,\n foreground=\"blue\",\n font=('Arial', 11, 'underline'))\n user_feedback_button.grid(column=0,\n columnspan=3,\n row=3,\n sticky=\"EW\",\n pady=60)\n\n def payroll_cost_center(self):\n self.interface_window_remover()\n\n button_frame = self.payroll_window = Frame(self)\n button_frame.grid()\n\n self.main_text_interface(button_frame, title_text=\"Payroll Table\",\n description_text=\"Provide a cost center for each department for a payroll table\",\n back_button_function=self.introduction_window, x_description=110)\n\n comma_note = ttk.Label(button_frame,\n text=\"- Use a comma if the specific department has multiple cost\\n centers.\",\n foreground=\"green\",\n font=('Arial', 11, 'bold'))\n comma_note.place(x=200, y=125)\n\n prof_note = ttk.Label(button_frame,\n text='- Type \"Professor\" if the department cost center is based on\\n '\n 'the professor of other departments',\n foreground=\"green\",\n font=('Arial', 11, 'bold'))\n prof_note.place(x=200, y=175)\n\n example_note = ttk.Label(button_frame,\n text=\"Example: 'BUS => Professor' will result in giving each\\n\"\n \"faculty department of cost center\",\n foreground=\"gray\",\n font=('Arial', 11, 'bold'))\n example_note.place(x=200, y=233)\n\n self.move_next_step = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Next step >\",\n command=self.selection_step_window,\n foreground=\"green\",\n font=('Arial', 16, 'bold'))\n self.move_next_step.grid(sticky='e',\n column=0,\n columnspan=2,\n row=6,\n pady=110,\n padx=20)\n\n def show_all_departments(event):\n canvas.configure(scrollregion=canvas.bbox(\"all\"), width=125, height=190)\n\n self.cost_department_list = Frame(button_frame, relief=GROOVE, width=125, height=190, bd=1)\n self.cost_department_list.grid()\n self.cost_department_list.place(x=40, y=110)\n\n canvas = Canvas(self.cost_department_list)\n\n self.mini_frame = Frame(canvas)\n\n # Scroll bar on a right side\n user_scrollbar_y = tk.Scrollbar(self.cost_department_list, orient=\"vertical\")\n user_scrollbar_y.pack(side=RIGHT, fill=Y)\n canvas.configure(yscrollcommand=user_scrollbar_y.set)\n canvas.pack(side=RIGHT, fill=BOTH)\n user_scrollbar_y.config(command=canvas.yview)\n\n canvas.create_window((0, 0), window=self.mini_frame, anchor='nw')\n self.mini_frame.bind(\"\", show_all_departments)\n self.mini_frame.bind(\"\", show_all_departments)\n self.mini_frame.bind(\"\", show_all_departments)\n\n self.scroll_error_messages()\n\n def scroll_error_messages(self):\n \"\"\"Shows all the errors\"\"\"\n\n def get_csv_file(file):\n cost_center = dict()\n if os.path.isfile(file):\n with open(file) as csv_file:\n read_csv_file = csv.DictReader(csv_file, delimiter=',')\n for row in read_csv_file:\n cost_center = dict(row)\n return cost_center\n\n csv_file_data = get_csv_file(f'{self.cwd}\\\\department_cost.csv')\n\n cob_department_list = [\"Marketing & International Business\", \"Accounting\", \"Business Law\", \"Finance\",\n \"MACC\", \"Management\", \"MBA\", \"BUS\"]\n\n self.cost_box_insert = []\n department_label_list = []\n\n for i in range(len(cob_department_list)):\n if cob_department_list[i] == \"Marketing & International Business\":\n short_abbrev = \"Marketing & I. Business\"\n department_label_list.append(Label(self.mini_frame, text=short_abbrev)) # creates entry boxes\n else:\n department_label_list.append(Label(self.mini_frame, text=cob_department_list[i]))\n self.cost_box_insert.append(Entry(self.mini_frame, text=cob_department_list[i])) # creates entry boxes\n department_label_list[i].pack()\n if csv_file_data is None:\n pass\n else:\n self.cost_box_insert[i].delete(0, 'end') # Clearing entry box\n self.cost_box_insert[i].insert(END, csv_file_data.get(cob_department_list[i]))\n\n self.cost_box_insert[i].pack()\n\n def cost_dict(self):\n for i in range(len(self.cost_box_insert)):\n self.department_cost_dict.update({self.cost_box_insert[i].cget(\"text\"): self.cost_box_insert[i].get()})\n\n # Writes a csv file\n cost_file = f'{self.cwd}\\\\department_cost.csv'\n try:\n with open(cost_file, 'w') as new_file:\n write_file = csv.DictWriter(new_file, self.department_cost_dict.keys())\n write_file.writeheader()\n write_file.writerow(self.department_cost_dict)\n except PermissionError:\n tk.messagebox.showerror(\"Please close .csv file\")\n self.introduction_window()\n\n def payroll_finish_window(self):\n self.interface_window_remover()\n\n button_frame = self.payroll_window = Frame(self)\n button_frame.grid()\n\n instructions_message = \"Payroll table(s) has been created ✔\"\n\n table_created_text = ttk.Label(button_frame,\n text=instructions_message,\n foreground=\"green\",\n font=('Arial', 24, 'bold'))\n table_created_text.grid(column=0,\n columnspan=3,\n row=1,\n rowspan=2,\n padx=55,\n pady=30)\n\n possible_error_notification = ttk.Label(button_frame,\n text=\"Please check for error at the end of excel file\",\n foreground=\"green\",\n font=('Arial', 12))\n possible_error_notification.grid(column=0,\n sticky='s',\n columnspan=3,\n row=2,\n padx=150,\n pady=0)\n\n open_file_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Open a Folder\",\n command=self.open_payroll_folder,\n foreground=\"green\",\n font=('Arial', 20, 'bold'))\n open_file_button.grid(columnspan=3,\n row=3,\n pady=30)\n\n exit_program_button = Button(button_frame,\n relief=\"groove\",\n bg='#c5eb93',\n border='4',\n text=\"Exit\",\n command=self.exit_function,\n foreground=\"green\",\n font=('Arial', 16, 'bold'))\n exit_program_button.grid(columnspan=3,\n row=3,\n rowspan=3,\n pady=150)\n","repo_name":"igorneaga/schedule","sub_path":"src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":71504,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"62"} +{"seq_id":"16705025279","text":"import pandas as pd\nimport streamlit as st\nimport plotly.express as px\n\n# Function to load Excel data\ndef load_data(file_path, sheet_name):\n return pd.read_excel(file_path, sheet_name=sheet_name)\n\n# Function to filter the DataFrame based on selected months and columns\ndef filter_data(df, selected_months, selected_columns):\n filtered_df = df[df[\"Month\"].isin(selected_months)][selected_columns]\n return filtered_df\n\n# Function to calculate column totals\ndef calculate_column_totals(df):\n return df.drop(\"Month\", axis=1).sum()\n\n# Function to display a data table and column totals\ndef display_data_table(filtered_df, column_totals):\n st.markdown(\"---\")\n col1, col2 = st.columns(2)\n\n filtered_df.index += 1\n col1.dataframe(filtered_df)\n col2.markdown(\"**# Total by Category:**\")\n col2.dataframe(pd.DataFrame({\"Totals\": column_totals}))\n\n# Function to create and display a line chart\ndef display_line_chart(data, x_column, y_columns):\n if len(data) > 0:\n fig = px.line(data, x=x_column, y=y_columns, markers=True, line_shape=\"linear\")\n\n # Update the legend title\n fig.update_layout(legend_title_text=\"Product Category\", title=\"# Quantity of Each Product Category\",)\n\n # Update Y-axis title from \"title\" to \"quantity\"\n fig.update_yaxes(title_text=\"Quantity\")\n\n st.plotly_chart(fig)\n else:\n st.warning(\"No data selected for the chart. Please select at least one month and one category.\")\n\n# Main function\ndef main():\n st.set_page_config(\n page_title=\"Categorized Sales\",\n layout=\"wide\"\n )\n\n # Hide Streamlit's menu, header, and footer\n st.markdown(\n \"\"\"\n \n \"\"\", unsafe_allow_html=True\n )\n\n # Title\n st.title('Categorized Sales')\n\n # Filter\n st.subheader(\"Please Filter Here:\")\n\n file_path = 'hadron.xlsx'\n sheet_name = \"Categorized Sales\"\n\n df = load_data(file_path, sheet_name)\n\n available_columns = [col for col in df.columns if col != \"Month\"]\n selected_columns = st.multiselect(\"Select Categories:\", available_columns, default=available_columns)\n selected_months = st.multiselect(\"Select Months:\", df[\"Month\"].unique(), default=df[\"Month\"].unique())\n\n if \"Month\" not in selected_columns:\n selected_columns.insert(0, \"Month\")\n\n filtered_df = filter_data(df, selected_months, selected_columns)\n column_totals = calculate_column_totals(filtered_df)\n\n # Display data table and column totals\n display_data_table(filtered_df, column_totals)\n\n # Check if \"Speaker\" and \"Cable\" are in selected_columns\n if \"Speaker\" in selected_columns and \"Cable\" in selected_columns:\n # If both are selected, show the selected line chart\n filtered_cable_speaker = filtered_df[[\"Month\", \"Cable\", \"Speaker\"]]\n display_line_chart(filtered_cable_speaker, \"Month\", [\"Cable\", \"Speaker\"])\n else:\n # If only one category is selected, show that category's line chart\n if \"Speaker\" in selected_columns:\n display_line_chart(filtered_df, \"Month\", [\"Speaker\"])\n elif \"Cable\" in selected_columns:\n display_line_chart(filtered_df, \"Month\", [\"Cable\"])\n else:\n # If both are deselected, show an empty chart\n st.plotly_chart(px.line(pd.DataFrame(columns=[\"Month\"]), x=\"Month\"))\n\n # Create a list of selected columns excluding \"Speaker\" and \"Cable\"\n other_categories = [col for col in selected_columns if col not in [\"Speaker\", \"Cable\"]]\n\n if other_categories:\n display_line_chart(filtered_df, \"Month\", other_categories)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"johnunicorndoe/hadron","sub_path":"pages/05_Categorized_Sales.py","file_name":"05_Categorized_Sales.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"5891559003","text":"# -*- coding: utf-8 -*-\n\"\"\"Poisson problem.\n\"\"\"\nfrom hysop.operator.computational import Computational\nfrom hysop.operator.discrete.poisson_fft import PoissonFFT\nfrom hysop.constants import debug\nfrom hysop.operator.velocity_correction import VelocityCorrection\nfrom hysop.operator.reprojection import Reprojection\nfrom hysop.methods_keys import SpaceDiscretisation, Formulation\nfrom hysop.operator.continuous import opsetup\nimport hysop.default_methods as default\n\n\nclass Poisson(Computational):\n \"\"\"\n \\f{eqnarray*}\n v = Op(\\omega)\n \\f} with :\n \\f{eqnarray*}\n \\Delta \\phi &=& -\\omega \\\\\n v &=& \\nabla \\times \\phi\n \\f}\n \"\"\"\n\n @debug\n def __init__(self, output_field, input_field, flowrate=None,\n projection=None, **kwds):\n \"\"\"\n Constructor for the Poisson problem.\n\n @param[out] output_field : solution field\n @param[in] input_field : rhs field\n @param[in] flowrate : a flow rate value (through input_field surf,\n normal to xdir) used to compute a correction of the solution field.\n Default = 0 (no correction). See hysop.operator.output_field_correction.\n @param projection : if None, no projection. Else:\n - either the value of the frequency of reprojection, never update.\n - or a tuple = (frequency, threshold).\n In that case, a criterion\n depending on the input_field will be computed at each time step, if\n criterion > threshold, then frequency projection is active.\n\n Note about method:\n - SpaceDiscretisation == fftw\n - Formulation = 'velocity' or 'pressure'\n velocity : laplacian(phi) = -w and v = nabla X psi, in = vorticity, out = velo\n pressure : laplacian(p) = -nabla.(u.nabla u, in = velo, out = pressure\n \"\"\"\n # Warning : for fftw all variables must have\n # the same resolution.\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Poisson, self).__init__(variables=[output_field, input_field],\n **kwds)\n ## solution of the problem\n self.output_field = output_field\n ## -(right-hand side)\n self.input_field = input_field\n if self.method is None:\n self.method = default.POISSON\n\n if self.method[SpaceDiscretisation] is not 'fftw':\n raise AttributeError(\"Method not yet implemented.\")\n\n # Deterlination of the Poisson equation formulation :\n # Velo Poisson eq or Pressure Poisson eq\n self.formulation = None\n if self.method[Formulation] is not 'velocity':\n self.formulation = self.method[Formulation]\n\n self.input = [self.input_field]\n self.output = [self.output_field]\n if flowrate is not None:\n self.withCorrection = True\n self._flowrate = flowrate\n else:\n self.withCorrection = False\n self.correction = None\n self.projection = projection\n self._config = kwds\n\n if projection is not None:\n self.output.append(self.input_field)\n\n def discretize(self):\n # Poisson solver based on fftw\n if self.method[SpaceDiscretisation] is 'fftw':\n super(Poisson, self)._fftw_discretize()\n if self.withCorrection:\n toporef = self.discreteFields[self.output_field].topology\n if 'discretization' in self._config:\n self._config['discretization'] = toporef\n self.correction = VelocityCorrection(\n self.output_field, self.input_field,\n req_flowrate=self._flowrate, **self._config)\n self.correction.discretize()\n\n if isinstance(self.projection, tuple):\n freq = self.projection[0]\n threshold = self.projection[1]\n self.projection = Reprojection(self.input_field,\n threshold, freq,\n **self._config)\n self.projection.discretize()\n else:\n raise AttributeError(\"Method not yet implemented.\")\n\n @debug\n @opsetup\n def setup(self, rwork=None, iwork=None):\n # Activate correction, if required\n if self.withCorrection:\n self.correction.setup()\n cd = self.correction.discrete_op\n else:\n cd = None\n\n # Activate projection, if required\n if isinstance(self.projection, Reprojection):\n # Projection frequency is updated at each\n # time step, and depends on the input_field\n self.projection.setup(rwork=rwork)\n projection_discr = self.projection.discrete_op\n else:\n projection_discr = self.projection\n\n self.discrete_op = PoissonFFT(self.discreteFields[self.output_field],\n self.discreteFields[self.input_field],\n correction=cd,\n rwork=rwork, iwork=iwork,\n projection=projection_discr,\n formulation=self.formulation)\n\n self._is_uptodate = True\n","repo_name":"ljktest/tmp-tests","sub_path":"hysop/operator/poisson.py","file_name":"poisson.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"73384666117","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nIMAGE_SIZE = 256\r\nBATCH_SIZE = 32\r\nEPOCHS =20\r\nCHANNELS = 1\r\n\r\n\r\n\r\n\r\ntrain_data= tf.keras.preprocessing.image_dataset_from_directory(directory=(r\"C:\\Users\\LENOVO\\Desktop\\Ayush Assignments\\chest-xray\\medical\\training\\chest_xray\\train\"), shuffle=True,image_size=(IMAGE_SIZE,IMAGE_SIZE), color_mode=\"grayscale\",batch_size=BATCH_SIZE, seed=42)\r\ntest_data= tf.keras.preprocessing.image_dataset_from_directory(directory=(r\"C:\\Users\\LENOVO\\Desktop\\Ayush Assignments\\chest-xray\\medical\\training\\chest_xray\\test\"), shuffle=True,image_size=(IMAGE_SIZE,IMAGE_SIZE),color_mode=\"grayscale\", batch_size=BATCH_SIZE, seed=42)\r\nvalidate_data= tf.keras.preprocessing.image_dataset_from_directory(directory=(r\"C:\\Users\\LENOVO\\Desktop\\Ayush Assignments\\chest-xray\\medical\\training\\chest_xray\\val\"), shuffle=True,image_size=(IMAGE_SIZE,IMAGE_SIZE), color_mode=\"grayscale\",batch_size=BATCH_SIZE, seed=42)\r\n\r\nclass_names = train_data.class_names\r\nplt.figure(figsize=(10,10))\r\nfor images, labels in train_data.take(1):\r\n\tfor i in range(12):\r\n\t\tax = plt.subplot(4,3,i+1)\r\n\t\tplt.imshow(images[i].numpy().astype(\"uint8\"))\r\n\t\tplt.title(class_names[labels[i]])\r\n\t\tplt.axis(\"off\")\r\n\r\n\r\ncategories = [\"Normal\", \"Pneumonia\"]\r\n\r\n# Plot Image Distribution\r\ncategories = ['Normal', 'Pneumonia']\r\nfrequencies =(train_data['class'].value_counts())[::-1]\r\nplt.bar(categories, frequencies)\r\nplt.xlabel(\"Categories\")\r\nplt.ylabel(\"Count\")\r\nplt.title(f'Data Distribution')\r\nplt.show()\r\n\r\n# As shown in the histogram, the dataset is very imbalanced. The dataset is heavily biased towards the pneumonia class, with roughly 3 times as many pneumonia \r\n# chest images as normal chest images. This is not very surprising, given that medical data is typically imbalanced. Given this heavy imbalance of pneumonia cases, \r\n# we want to make sure to adjust our classifier for this imbalance.\r\n\r\n\r\n","repo_name":"Alhamdou/Pneumonia-Disease-Prediction","sub_path":"DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"13886303398","text":"from time import sleep\n\nimport selenium.common.exceptions\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome()\n\n\ndef get_all_room_info():\n with open(file=\"./result.csv\", mode=\"w\", encoding=\"utf-8\", newline=\"\\n\") as file:\n file.write(\"物件名,賃料,管理費\\n\")\n for page_number in range(1, 99999):\n page_url = \"https://www.citymobile.co.jp/recommend2/page/{}/?dsp%5Bexists%5D=1&dsp%5Blimit%5D=50\".format(\n page_number)\n driver.get(page_url)\n\n all_buildings_elements_on_this_page = driver.find_elements(by=By.XPATH, value='//dd[@class=\"srcRes\"]/ul/li')\n if len(all_buildings_elements_on_this_page) == 0:\n driver.close()\n return\n for li_element in all_buildings_elements_on_this_page:\n title_for_clicking = li_element.find_element(by=By.XPATH, value='.//div[@class=\"bukkenName\"]/a')\n title_for_clicking.click()\n\n # If there is no \"【空室\" title, just skip the following procedures and go to next li_element.\n try:\n empty_house_title = driver.find_element(by=By.XPATH,\n value='//dt[@class=\"article-list-title\" and contains(text(), \"【空室\")]')\n parent_element = empty_house_title.find_element(by=By.XPATH, value='./..')\n\n # If there is \"more\" button, click it.\n # (If the building has more than 10 empty rooms, there will be a more button.)\n try:\n more_button = parent_element.find_element(by=By.XPATH,\n value='.//a[contains(@class, \"btnListMore\")]')\n more_button.click()\n except selenium.common.exceptions.NoSuchElementException:\n pass\n\n rent_fee_elements_list = parent_element.find_elements(by=By.XPATH, value='.//div[@class=\"rentFee\"]')\n if len(rent_fee_elements_list) == 0:\n driver.back()\n continue\n manage_fee_elements_list = parent_element.find_elements(by=By.XPATH, value='.//div[@class=\"manageFee\"]')\n building_name_element = driver.find_element(by=By.XPATH, value='//h2[@class=\"article-name\"]')\n\n for index in range(0, len(rent_fee_elements_list)):\n room_info_list = [building_name_element.text, rent_fee_elements_list[index].text,\n manage_fee_elements_list[index].text]\n for room_info_list_index in range(1, 3):\n room_info_list[room_info_list_index] = room_info_list[room_info_list_index].replace(\"円\",\n \"\").replace(\n \",\",\n \"\").strip()\n file.write(\"{},{},{}\\n\".format(room_info_list[0], room_info_list[1], room_info_list[2]))\n file.flush()\n except selenium.common.exceptions.NoSuchElementException:\n pass\n\n driver.back()\n\n driver.close()\n\n\nif __name__ == '__main__':\n get_all_room_info()\n","repo_name":"JimLambda/temp_test_learn_project","sub_path":"selenium_crawlers/citymobile_co_jp_crawler/citymobile_co_jp_crawler.py","file_name":"citymobile_co_jp_crawler.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"11641514535","text":"from flask import g\nfrom flask_restful import Resource, reqparse, fields, marshal_with\nfrom dateutil import parser\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom ... import db\nfrom .users import user_fields\nfrom ...models import User, Machine\n\n\nactive_revision_fields = {\n 'id': fields.Integer,\n 'cpu_make': fields.String,\n 'cpu_name': fields.String,\n 'cpu_socket': fields.String,\n 'cpu_mhz': fields.Integer(default=None),\n 'cpu_proc_cores': fields.Integer(default=None),\n 'chipset': fields.String,\n 'system_memory_gb': fields.Integer(default=None),\n 'system_memory_mhz': fields.Integer(default=None),\n 'gpu_name': fields.String,\n 'gpu_make': fields.String,\n 'gpu_memory_gb': fields.Integer(default=None),\n 'revision_notes': fields.String,\n 'revision_notes_html': fields.String,\n 'pcpartpicker_url': fields.String,\n 'timestamp': fields.DateTime(dt_format='iso8601')\n # 'uri': fields.Url('.revision', absolute=True)\n}\n\nmachine_fields = {\n 'id': fields.Integer,\n 'system_name': fields.String,\n 'system_notes': fields.String,\n 'owner': fields.String,\n 'active_revision': fields.Nested(active_revision_fields),\n 'timestamp': fields.DateTime(dt_format='iso8601'),\n # 'uri': fields.Url('.machine', absolute=True),\n 'user': fields.Nested(user_fields)\n}\n\n\n# View subclass of Resource (which inherits from MethodView)\nclass MachineListAPI(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('system_name', type=str, required=True,\n help='No machine name provided',\n location='json')\n self.reqparse.add_argument('system_notes', type=str, default=\"\",\n location='json')\n self.reqparse.add_argument('owner', type=str, default=\"\",\n location='json')\n self.reqparse.add_argument('timestamp', type=str,\n location='json')\n super(MachineListAPI, self).__init__()\n\n @marshal_with(machine_fields, envelope='machines')\n def get(self):\n return Machine.query.order_by(Machine.timestamp.desc()).all()\n\n @jwt_required\n @marshal_with(machine_fields, envelope='machine')\n def post(self):\n args = self.reqparse.parse_args()\n\n # parse the timestamp provided\n ts = None # set to none if not provided next\n if args['timestamp'] is not None:\n ts = parser.parse(args['timestamp'])\n current_username = get_jwt_identity()\n current_user = User.find_by_username(current_username)\n\n machine = Machine(system_name=args['system_name'],\n system_notes=args['system_notes'],\n owner=args['owner'],\n timestamp=ts,\n active_revision_id=None,\n author_id=current_user.id)\n\n db.session.add(machine)\n db.session.commit()\n return machine, 201\n\nclass UserMachineListAPI(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('system_name', type=str, required=True,\n help='No machine name provided',\n location='json')\n self.reqparse.add_argument('system_notes', type=str, default=\"\",\n location='json')\n self.reqparse.add_argument('owner', type=str, default=\"\",\n location='json')\n self.reqparse.add_argument('timestamp', type=str,\n location='json')\n super(UserMachineListAPI, self).__init__()\n\n @marshal_with(machine_fields, envelope='machines')\n def get(self, id):\n return User.query.get(id\n ).machines.order_by(Machine.timestamp.desc()).all()\n\n # @jwt_required\n @jwt_required\n @marshal_with(machine_fields, envelope='machine')\n def post(self, id):\n args = self.reqparse.parse_args()\n\n # parse the timestamp provided\n ts = None # set to none if not provided next\n if args['timestamp'] is not None:\n ts = parser.parse(args['timestamp'])\n\n machine = Machine(system_name=args['system_name'],\n system_notes=args['system_notes'],\n owner=args['owner'],\n timestamp=ts,\n author_id=id)\n\n db.session.add(machine)\n db.session.commit()\n return machine, 201\n\n\nclass MachineAPI(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('system_name', type=str, location='json')\n self.reqparse.add_argument('system_notes', type=str, location='json')\n self.reqparse.add_argument('owner', type=str, location='json')\n self.reqparse.add_argument('timestamp', type=str,\n location='json')\n super(MachineAPI, self).__init__()\n\n @marshal_with(machine_fields)\n def get(self, id):\n return Machine.query.get(id)\n\n # @jwt_required\n @jwt_required\n @marshal_with(machine_fields, envelope='machine')\n def put(self, id):\n machine = Machine.query.get_or_404(id)\n\n # a little clever loop to go through all the args passed and\n # apply them to the newly instantiated Machine object\n # since the SQLAlchemy machine object does not support item\n # assignment, let's use some setattr func\n args = self.reqparse.parse_args()\n\n for k, v in args.items():\n if v is not None:\n # this is a hack because I couldn't get a built-in datetime parser\n # to work. This is bad and you should feel bad for reading it.\n if k == 'timestamp':\n setattr(machine, k, parser.parse(v))\n else:\n setattr(machine, k, v)\n # autocommit? This doesn't appear to be necessary---leaving in for now.\n db.session.commit()\n return machine\n\n # @jwt_required\n @jwt_required\n def delete(self, id):\n Machine.query.filter(Machine.id == id).delete()\n db.session.commit()\n return {'result': True}\n","repo_name":"rivalrockets/rivalrockets-api","sub_path":"app/api_1_0/resources/machines.py","file_name":"machines.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"22600719960","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom keras.utils.vis_utils import plot_model\nfrom matplotlib.colors import ListedColormap\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n\ndef plot_decision_boundary(func, X, y, figsize=(9, 6)):\n amin, bmin = X.min(axis=0) - 0.1\n amax, bmax = X.max(axis=0) + 0.1\n hticks = np.linspace(amin, amax, 101)\n vticks = np.linspace(bmin, bmax, 101)\n\n aa, bb = np.meshgrid(hticks, vticks)\n ab = np.c_[aa.ravel(), bb.ravel()]\n c = func(ab)\n cc = c.reshape(aa.shape)\n\n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n\n fig, ax = plt.subplots(figsize=figsize)\n contour = plt.contourf(aa, bb, cc, cmap=cm, alpha=0.8)\n\n ax_c = fig.colorbar(contour)\n ax_c.set_label(\"$P(y = 1)$\")\n ax_c.set_ticks([0, 0.25, 0.5, 0.75, 1])\n\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)\n plt.xlim(amin, amax)\n plt.ylim(bmin, bmax)\n\n\npoints = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]\nclasses = [1.0, 0.0, 0.0, 1.0]\n\nrandom.seed(10)\ntf.random.set_seed(10)\n\nmodel = keras.Sequential([\n keras.layers.Dense(16, activation='sigmoid'),\n keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.compile(loss='mse', optimizer=keras.optimizers.Adam(\n learning_rate=0.05, epsilon=1e-07), metrics=['AUC'])\n\nhistory = model.fit(points, classes, epochs=100, verbose=0, batch_size=4)\n\nplt.plot(history.history['loss'])\nplt.show()\n\nresults = model.predict(np.array(points))\nresults = [0 if a < 0.5 else 1 for a in results]\n\nplot_decision_boundary(model.predict, np.array(points), results)\nplt.show()\n","repo_name":"exorevan/AI-R-D-projects-","sub_path":"Classifiers/XOR-task/keras-sequential/XOR.py","file_name":"XOR.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"62"} +{"seq_id":"31381573981","text":"# -*- encoding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom cache.views import *\n\nurlpatterns = [\n url(r'^count$', get_count, name='get_count'),\n url(r'^batch$', get_batch, name='get_batch'),\n url(r'^(?P10\\..*)', get_doi, name='get_doi'),\n url(r'^zotero/(?P10\\..*)', get_zotero_doi, name='get_zotero_url'),\n url(r'^zotero/query', get_zotero_url, name='get_zotero_url'),\n]\n\n","repo_name":"wetneb/doi_cache","sub_path":"cache/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"62"} +{"seq_id":"12626805776","text":"import csv\nfrom googlesearch import search\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.internationalschoolsearch.com/search/All+All+All+India+All/\"\n\ndatas = []\n\nr = requests.get(url)\nhtmlContent = r.content\n# print(htmlContent)\n\nsoup = BeautifulSoup(htmlContent, 'html.parser')\n# print(soup.prettify())\n\nitems= soup.findAll('div', 'col-md-4 listing')\n# print(items)\n\nfor x in items:\n names = x.findAll('h3')\n actualNames = names[1].string\n # print(names[1].string)\n\n address = x.findAll('p')\n actual = address[0].text\n ActualAdress = actual.strip('\\n')\n ActualAdress = ActualAdress.strip('\\t')\n string = str(actualNames) + str(ActualAdress)\n j = search(string, num_results=3)\n # print(j[0])\n datas.append([actualNames, ActualAdress,j[0]])\n # print(actual)\n\n # list = pd.DataFrame({'names':names, 'actual':actual})\n\nwith open('international.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n headers = ['Schools Names',' Schools Address', 'websites']\n writer.writerow(headers)\n for data in datas:\n writer.writerow(data)\n#\n# print(\"Stopped\")","repo_name":"Nitish-Kumar-kushwaha/List-of-International-Schools-in-India","sub_path":"InterNationalSchoolsListInIndia.py","file_name":"InterNationalSchoolsListInIndia.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4082184778","text":"import numpy as np\nfrom matrixcross import matrixcross\n\n\nclass Fuselage:\n def __init__(self, m, pcm, I):\n self.m = m\n self.pcm = pcm\n self.I = I\n self.ttpcm = matrixcross(pcm)\n self.tpcm = self.ttpcm.transpose()\n self.MRB = np.block([[np.eye(3) * m, m * self.ttpcm], [m * self.tpcm, self.I]])\n self.N = self.MRB[:, 0: 3]\n\n def getCRB(self, omega):\n CRB = np.block([[self.m * omega, self.m * omega * self.ttpcm], [self.m * self.tpcm * omega, self.I * omega]])\n\n return CRB\n","repo_name":"mid2SUPAERO/HAR_wing_aeroelasticity","sub_path":"HAR Wing Aeroelastical Tool/Fuselage.py","file_name":"Fuselage.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"12780969506","text":"# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport scipy.stats as stats\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom statsmodels.stats.weightstats import ztest\r\nfrom statsmodels.stats.weightstats import ztest\r\nfrom scipy.stats import chi2_contingency\r\n\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n#Sample_Size\r\nsample_size=2000\r\n\r\n#Z_Critical Score\r\nz_critical = stats.norm.ppf(q = 0.95) \r\n\r\n# Critical Value\r\ncritical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*\r\n df = 6) # Df = number of variable categories(in purpose) - 1\r\n\r\n\r\n#Reading file\r\ndata=pd.read_csv(path)\r\n\r\n#Code starts here\r\n#task 1\r\ndata_sample =data.sample(n=sample_size,random_state=0)\r\n\r\nsample_mean= data_sample['installment'].mean()\r\nprint(\"Sample mean is \",sample_mean)\r\ninstallment_std= data_sample['installment'].std()\r\nprint(\"installment_std is \",installment_std)\r\nmargin_of_error = z_critical*(installment_std/math.sqrt(sample_size))\r\nprint(\"margin_of_error is\",margin_of_error)\r\nconfidence_interval=(sample_mean - margin_of_error, sample_mean + margin_of_error)\r\nprint('confidence_interval = ',[round(confidence_interval[0],2),round(confidence_interval[1],2)])\r\ntrue_mean = data['installment'].mean()\r\nprint('true_mean = ',round(true_mean,2))\r\nif true_mean >= confidence_interval[0] and true_mean <= confidence_interval[1]:\r\n print (\"true mean of installment column of data lies in the confidence interval.\")\r\nelse :\r\n print (\"true mean of installment column of data does not lies in the confidence interval\")\r\n\r\n#CLT\r\nsample_sizes = np.array([20,50,100])\r\nplt.figure(figsize = [10,5])\r\nfor sample_size in sample_sizes:\r\n lst = []\r\n for i in range (1000):\r\n data_new = data.sample(sample_size)\r\n lst.append(data_new['installment'].mean())\r\n sns.distplot(lst,hist = True, label = 'sample size {}'. format(sample_size))\r\n plt.legend()\r\n#Small Business Interests\r\ndata['int.rate']=data['int.rate'].map(lambda x:str(x)[:-1])\r\n\r\n#divide column value by 100\r\ndata['int.rate']=data['int.rate'].astype(float)/100\r\n\r\n#Applying ztest for hypothesis\r\nz_statistic_1,p_value_1 =ztest(x1=data[data['purpose']=='small_business']['int.rate'],value=data['int.rate'].mean(),alternative ='larger')\r\n\r\nprint((\"z_statistic is:{}\".format(z_statistic_1)))\r\nprint((\"P_value is :{}\".format(p_value_1)))\r\n\r\n#Installment vs Loan Defaulting\r\nz_statistic_2,p_value_2 =ztest(x1=data[data['paid.back.loan']=='No']['installment'],x2=data[data['paid.back.loan']=='Yes']['installment'])\r\n\r\nprint((\"z_statistic is:{}\".format(z_statistic_2)))\r\nprint((\"P_value is :{}\".format(p_value_2)))\r\n\r\n#Purpose vs Loan Defaulting\r\n\r\nyes =data[data['paid.back.loan']=='Yes']['purpose'].value_counts()\r\nno =data[data['paid.back.loan']=='No']['purpose'].value_counts()\r\n\r\nobserved=pd.concat([yes.transpose(),no.transpose()],1,keys=['Yes','No'])\r\n\r\nprint(observed)\r\n\r\nchi2,p,dof,ex=chi2_contingency(observed)\r\nprint(\"critical_value is : {}\".format(critical_value))\r\n\r\nprint(\"chi_statistic is : {}\".format(chi2))\r\n\r\nprint(\"p_value is : {}\".format(p))\r\n\r\n\r\n\r\n\n\n\n","repo_name":"saru712/ga-learner-dsmp-repo","sub_path":"banking-inferences-project/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17383454767","text":"#!/usr/bin/env python\n\nfrom readline import write_history_file\nfrom traceback import print_tb\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\n\nimport numpy as np \nimport sys\n\nimport time\n#robot 1 sensor front\n\nr1_dsf={}\n\n\n#robot 1 sensor back\n\nr1_dsb={}\n\n#robot 1 sensor front\n\nr1_dsf2=[]\n\n\n#robot 1 sensor back\n\nr1_dsb2=[]\n\ndef robot_1_sensorfront(msg):\n\n global r1_dsf,r1_dsf2\n\n #rospy.loginfo(\"I heard %s\",data.data) \n\n r1_dsf={}\n \n for ind in range(len(msg.ranges)):\n\n if not msg.ranges[ ind ] in r1_dsf:\n r1_dsf[ msg.ranges[ ind ] ]= [ind * msg.angle_increment - msg.angle_min]\n else:\n r1_dsf[ msg.ranges[ ind ] ].append(ind * msg.angle_increment - msg.angle_min)\n r1_dsf2=list(msg.ranges)\n\ndef robot_1_sensorback(msg):\n\n global r1_dsb,r1_dsb2\n\n r1_dsb={}\n #print(\"s\")\n for ind in range(len(msg.ranges)):\n\n if not msg.ranges[ ind ] in r1_dsf:\n r1_dsb[ msg.ranges[ ind ] ]= [ind * msg.angle_increment - msg.angle_min]\n else:\n r1_dsb[ msg.ranges[ ind ] ].append(ind * msg.angle_increment - msg.angle_min)\n r1_dsb2=list(msg.ranges)\n\ndef angle_min(dis_dist):\n if len(dis_dist.keys())>0:\n minimo=np.min(np.array(dis_dist.keys()))\n angulo=dis_dist[minimo][0]\n return angulo,minimo\n else:\n return 0,0\n\ndef cero_move(pub,move):\n\n move.linear.x=0\n move.linear.y=0\n move.linear.z=0\n\n move.angular.x=0\n move.angular.y=0\n move.angular.z=0\n\n pub.publish(move)\n\ndef alante(minimo,pub,move):\n\n move.linear.x=0.1\n move.linear.y=0\n move.linear.z=0\n\n move.angular.x=0\n move.angular.y=0\n move.angular.z=0\n\n pub.publish(move)\n return move.linear.x\n\ndef magnitude(vector): \n return np.sqrt(sum(pow(element, 2) for element in vector))\n\nr1_positions=[]\ndef robot_1_Odometry(msg):\n global r1_positions\n x=msg.pose.pose.position.x\n y=msg.pose.pose.position.y\n #if len(r1_positions)>=1:\n # desplacamiento+=magnitude(np.array(r1_positions[-1])-np.array([x,y]))\n r1_positions.append([x,y])\n\ndef dist_recorrida(p,previous_p):\n #print(p,previous_p)\n x,y=p\n previous_x,previous_y=previous_p\n #print\n d_increment = np.sqrt((x - previous_x) * (x - previous_x) +\n (y - previous_y) * (y - previous_y))\n #print(d_increment)\n return d_increment\n\ndef funcion_fitness(distancia_minimas,vels,angulos,tiempo):\n global dist_minima_front_l,dist_minima_front_r,r1_positions,dist_minima_back\n\n #print(robot,tiempo)\n #tiempo_n=tiempo\n #if tiempo>50:\n # tiempo_n=50\n #tiempo_n=cambio_de_tiempo(tiempo_n)\n vels=np.array(vels)\n angulos=np.array(angulos)\n \n desplacamiento=np.sum(np.array([ dist_recorrida( r1_positions[i],r1_positions[i-1] ) for i in range(1, len(r1_positions) ) ]) )\n print(\"tiempo,desplazamiento,distanciaminima\")\n print(tiempo,desplacamiento,np.mean(distancia_minimas))\n print(\"mena angulos,sum angulos,maximode angulos,mean vels,min vels\")\n if len(angulos)>0:\n print(np.mean(angulos),np.sum(angulos),np.max(angulos),np.mean(vels),np.min(vels))\n print(\"mean vels,min vels,max vels\")\n print(np.mean(vels),np.min(vels),np.max(vels))\n print(\"minimo frontal,minimo derecha,minimo izquierda\")\n print(np.mean(dist_minima_front_l),np.mean(dist_minima_front_r))\n\n #distancias=np.mean(dist_minima_front_l)+np.mean(dist_minima_front_r)+2.5*desplacamiento\n #distancias=np.mean(distancia_minim_front)*15+np.mean(dist_minima_front_l)*5+np.mean(dist_minima_front_r)*5+100*desplacamiento+30*np.mean(distancia_minimas)\n #distancias+=np.mean(dist_minima_back)*30\n #vel=3*np.mean(vels)+(3/np.mean(angulos))+len(angulos)/np.sum(angulos)\n #vel=100*np.mean(vels)+(100/(np.mean(angulos)*10))+(100/(np.sum(angulos)))+(tiempo/(np.sum(angulos)*10))\n\n #a=distancias + vel\n #lo que hace es que el robot se mueva muchisimo mas lento para no colisionar\n\n #0.5*np.mean(distancia_minimas)+(np.mean(vels)/3)+(3/np.mean(angulos))+(time/np.sum(angulos))+desplacamiento+np.mean(distancia_minim_front)+(len(angulos)/np.sum(angulos))\n \n\n\ndef print_valores_fitness():\n print(\"minimo_frontal\")\n\ndef get_giro(ang,dist,pub,move):\n \n move.angular.z= -1/(dist*ang)\n pub.publish(move)\n return move.angular.x,move.angular.z\n\n\nrospy.init_node('node_name')\nsub=rospy.Subscriber(\"/robot1/laser_front/scan\", LaserScan, robot_1_sensorfront)\nsub2=rospy.Subscriber(\"/robot1/laser_back/scan\", LaserScan, robot_1_sensorback)\nsub3=rospy.Subscriber(\"/robot1/laser_back/odom\", Odometry, robot_1_Odometry) \nmove_r1=rospy.Publisher(\"/robot1/cmd_vel\", Twist,queue_size=1)\nmove=Twist()\n\nrate=rospy.Rate(10)\n\ncero_move(move_r1,move)\ndist_minima=[]\ndist_minima_front=[]\nglobal dist_minima_front_l,dist_minima_front_r,dist_minima_back\ndist_minima_front_l=[]\ndist_minima_front_r=[]\ndist_minima_back=[]\nangulos=[]\ninicial = time.time()\n\nvels=[]\n\nwhile not rospy.is_shutdown():\n print(\"probando si funciona\")\n if len(r1_dsf2)>0:\n #print(r1_dsf)\n angulo,minimo=angle_min(r1_dsf)\n\n X = np.array([ r1_dsf2+r1_dsb2 ]).T\n dist_minima.append(np.min(X))\n dist_minima_front.append(np.min(np.array(r1_dsf2)))\n dist_minima_front_l.append(np.min(np.array(r1_dsf2[:len(r1_dsf2)//2 ])))\n dist_minima_front_r.append(np.min(np.array(r1_dsf2[len(r1_dsf2)//2:] )))\n dist_minima_back.append(np.min(np.array(r1_dsb2)))\n ahora=time.time()\n if minimo>0:\n if minimo>0.5:\n vels.append(alante(minimo,move_r1,move))\n else:\n a=get_giro( angulo,minimo,move_r1,move)\n vels.append(a[0])\n angulos.append(a[1])\n\n funcion_fitness(dist_minima,vels,angulos,ahora-inicial) \n\n else:\n cero_move(move_r1,move)\n\n # spin() simply keeps python from exiting until this node is stopped\n #rospy.spin()\n rate.sleep()\n","repo_name":"ajlorenzo1315/robotica_de_servicios","sub_path":"practica_1/ali/to_ubuntu/p1servizos/src/robot_1.py","file_name":"robot_1.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16963311567","text":"#Exercício Python 090: Faça um programa que leia nome e média de um aluno, guardando também a situação\n# em um dicionário. No final, mostre o conteúdo da estrutura na tela.\n\nalunomedia = {}\n\n#coletando dados\nalunomedia['nome'] = str(input(\"NOME DO ALUNO:\"))\nalunomedia['media'] = float(input(\"MÉDIA DO ALUNO:\"))\n\n#analisando situação\nif alunomedia['media'] >= 6:\n alunomedia['resultado'] = \"APROVADO\"\nelse: alunomedia['resultado'] = \"REPROVADO\"\n\n#printando resultados\nprint(\"-=-\"*18)\nprint(\"ALUNO:\" ,alunomedia['nome'])\nprint(\"NOTA:\", alunomedia['media'])\nprint(\"SITUAÇÃO:\", alunomedia['resultado'])\n\n\n","repo_name":"Miguelmorassuti/Python-Exercicios","sub_path":"Pacote-Dowload/EX 90 - DICIONÁRIOS - PYTHON.py","file_name":"EX 90 - DICIONÁRIOS - PYTHON.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30737693367","text":"\"\"\"\nThis module contains a wrapper for the trt_pose model to estimate human poses.\n\"\"\"\n\nfrom os import path\nimport json\nimport trt_pose.coco\nimport trt_pose.models\nimport torch\nimport torch2trt\nimport cv2\nimport torchvision.transforms as transforms\nimport PIL.Image\nimport numpy as np\nfrom trt_pose.draw_objects import DrawObjects\nfrom trt_pose.parse_objects import ParseObjects\n\nDATASETS_DIR = '../datasets/'\nMODELS_DIR = '../pretrained-models/'\n\nDATASET_POSE = 'human_pose.json'\nMODEL_RESNET18 = 'resnet18_baseline_att_224x224_A_epoch_249.pth'\nMODEL_RESNET18_OPTIMIZED = 'resnet18_baseline_att_224x224_A_epoch_249_trt.pth'\n\nWIDTH = 224\nHEIGHT = 224\n\nclass PoseModel():\n \"\"\"\n Class for estimating poses with trt_pose.\n \"\"\"\n def __init__(self):\n # load json containing human pose tasks\n with open(DATASETS_DIR + DATASET_POSE, 'r') as human_pose_file:\n human_pose = json.load(human_pose_file)\n # set topology\n topology = trt_pose.coco.coco_category_to_topology(human_pose)\n # load model\n num_parts = len(human_pose['keypoints'])\n num_links = len(human_pose['skeleton'])\n model = trt_pose.models.resnet18_baseline_att(num_parts, 2 * num_links).cuda().eval()\n # load model weights\n model.load_state_dict(torch.load(MODELS_DIR + MODEL_RESNET18))\n # optimize the model\n data = torch.zeros((1, 3, HEIGHT, WIDTH)).cuda()\n if not path.exists(MODELS_DIR + MODEL_RESNET18_OPTIMIZED):\n model.load_state_dict(torch.load(MODELS_DIR + MODEL_RESNET18))\n self.model_trt = torch2trt.torch2trt(\\\n model, [data], fp16_mode=True, max_workspace_size=1<<25)\n torch.save(self.model_trt.state_dict(), MODELS_DIR + MODEL_RESNET18_OPTIMIZED)\n self.model_trt = torch2trt.TRTModule()\n self.model_trt.load_state_dict(torch.load(MODELS_DIR + MODEL_RESNET18_OPTIMIZED))\n # setup\n self.parse_objects = ParseObjects(topology)\n self.draw_objects = DrawObjects(topology)\n self.mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()\n self.std = torch.Tensor([0.229, 0.224, 0.225]).cuda()\n self.device = torch.device('cuda')\n\n @classmethod\n def get_keypoints(cls, image, counts, objects, peaks):\n \"\"\"\n peaks: 1x18x100x2\n \"\"\"\n # COCO order\n coco_indices = [0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]\n # List of all {pose, score}s within each frame\n frame_dets = []\n height, width = image.shape[:2]\n count = int(counts[0])\n for i in range(count):\n obj = objects[0][i]\n pose = np.empty((0, 2), float)\n for j in range(obj.shape[0]):\n k = int(obj[j])\n if k >= 0:\n peak = peaks[0][j][k]\n k_x = float(peak[1]) * width\n k_y = float(peak[0]) * height\n pose = np.vstack([pose, [k_x, k_y]])\n else:\n pose = np.vstack([pose, [0, 0]])\n pose = pose[coco_indices]\n for j in range(pose.shape[0]):\n coords = tuple(np.round(pose[j]).astype(int))\n cv2.circle(image, coords, 3, (0, 255, 0), 2)\n det = {'pose': pose.tolist(), 'score': 1.0}\n frame_dets.append(det)\n return frame_dets\n\n def preprocess(self, image):\n \"\"\"\n Preprocesses an image before handing it over to be\n processed by the NN.\n \"\"\"\n self.device = torch.device('cuda')\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = PIL.Image.fromarray(image)\n image = transforms.functional.to_tensor(image).to(self.device)\n image.sub_(self.mean[:, None, None]).div_(self.std[:, None, None])\n return image[None, ...]\n\n def estimate_pose(self, frame):\n \"\"\"\n Passes an image through the NN to estimate the poses on it.\n Returns the frame resized and its keypoints.\n \"\"\"\n image = cv2.resize(frame, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_AREA)\n data = self.preprocess(image)\n cmap, paf = self.model_trt(data)\n cmap, paf = cmap.detach().cpu(), paf.detach().cpu()\n counts, objects, peaks = self.parse_objects(cmap, paf)\n counts = counts.detach().cpu().numpy()\n objects = objects.detach().cpu().numpy()\n peaks = peaks.detach().cpu().numpy()\n height, width = image.shape[:2]\n image_resized = cv2.resize(image, (int(width) * 5, int(height) * 5))\n keypoints = PoseModel.get_keypoints(image_resized, counts, objects, peaks)\n return image_resized, keypoints\n \n","repo_name":"IW276/IW276WS20-P12","sub_path":"src/utils/pose_model.py","file_name":"pose_model.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74356791626","text":"import os\nimport asyncio\n\nfrom audio_converter.audio_converter import AudioConverter, AudioUtils\nfrom common.config import config\nfrom common.util import PathUtils\n\n\nclass ExhaleConverter(AudioConverter):\n def __init__(self, semaphore, file_path, src_path, dst_path):\n super().__init__(semaphore, file_path, src_path, dst_path)\n\n async def single_convert(self):\n async with self.semaphore:\n print(f'converting to USAC: {self.file_path}')\n\n ffmpeg_path = config.get('executable', {}).get('ffmpeg', 'ffmpeg')\n exhale_path = config.get('executable', {}).get('exhale', 'exhale')\n exhale_preset = config.get('usac_config', {}).get('preset', 5)\n new_file_path = PathUtils.create_file_path_struct(self.file_path, self.src_path, self.dst_path, '.m4a')\n tmp_file_path = os.path.join(os.path.dirname(new_file_path), f'_tmp_{os.path.basename(new_file_path)}')\n\n ffmpeg_cmd = f'\"{ffmpeg_path}\" -y -i \"{self.file_path}\" -f wav -'\n exhale_cmd = f'\"{exhale_path}\" {exhale_preset} \"{tmp_file_path}\"'\n\n pipe_reader, pipe_writer = os.pipe()\n\n ffmpeg_process = await asyncio.create_subprocess_shell(\n ffmpeg_cmd,\n stdout=pipe_writer,\n stderr=asyncio.subprocess.DEVNULL\n )\n os.close(pipe_writer)\n\n exhale_process = await asyncio.create_subprocess_shell(\n exhale_cmd,\n stdin=pipe_reader,\n stderr=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n )\n os.close(pipe_reader)\n\n await exhale_process.communicate()\n await ffmpeg_process.communicate()\n\n metadata = await AudioUtils.get_metadata_by_ffprobe(self.file_path)\n await AudioUtils.add_metadata_by_ffmpeg(metadata, tmp_file_path, new_file_path)\n\n async def cue_convert(self):\n sub_workers_list = []\n\n async with self.semaphore:\n new_file_dir = PathUtils.create_dir_path_struct(self.file_path, self.src_path, self.dst_path)\n tracks = self._get_cue_tracks()\n for track in tracks:\n out_track_name = f'{track[\"idx\"]:02d}. {track[\"title\"]}.m4a'\n out_track_path = os.path.join(new_file_dir, out_track_name)\n tmp_track_path = os.path.join(\n os.path.dirname(out_track_path),\n f'_tmp_{os.path.basename(out_track_path)}'\n )\n\n ffmpeg_path = config.get('executable', {}).get('ffmpeg', 'ffmpeg')\n ffmpeg_cmd = f'\"{ffmpeg_path}\" -y -i \"{self.file_path}\" -ss {track[\"start_time\"]}'\n if track.get('end_time'):\n ffmpeg_cmd += f' -to {track[\"end_time\"]}'\n ffmpeg_cmd += ' -f wav -'\n\n exhale_path = config.get('executable', {}).get('exhale', 'exhale')\n exhale_preset = config.get('usac_config', {}).get('preset', 5)\n exhale_cmd = f'\"{exhale_path}\" {exhale_preset} \"{tmp_track_path}\"'\n\n async def track_task(f_cmd, e_cmd, metadata, t_path, o_path, idx):\n async with self.semaphore:\n print(f'converting to USAC: {self.file_path}, track {idx:02d}')\n\n pipe_reader, pipe_writer = os.pipe()\n\n ffmpeg_process = await asyncio.create_subprocess_shell(\n f_cmd,\n stderr=asyncio.subprocess.DEVNULL,\n stdout=pipe_writer\n )\n os.close(pipe_writer)\n\n exhale_process = await asyncio.create_subprocess_shell(\n e_cmd,\n stdin=pipe_reader,\n stderr=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n )\n os.close(pipe_reader)\n\n await exhale_process.communicate()\n await ffmpeg_process.communicate()\n\n await AudioUtils.add_metadata_by_ffmpeg(metadata, t_path, o_path)\n\n sub_worker = asyncio.create_task(\n track_task(ffmpeg_cmd, exhale_cmd, track['metadata'], tmp_track_path, out_track_path, track[\"idx\"])\n )\n sub_workers_list.append(sub_worker)\n\n await asyncio.gather(*sub_workers_list)\n\n def get_ext(self):\n return '.m4a'\n","repo_name":"kewenyu/py_album_condense","sub_path":"audio_converter/exhale_converter.py","file_name":"exhale_converter.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3957990083","text":"import torch\nimport torch.nn as nn\n\nimport gym\nimport env\n\nimport driver\nfrom gymviz import Plot\n\nimport buffer as bf\nfrom algos import awac\nfrom torch.distributions import Categorical\nfrom config import exists_and_not_none, ArgumentParser\nimport wandb\nimport wandb_utils\nimport checkpoint\nimport baselines.helper as helper\nimport numpy as np\nfrom torch.nn.functional import log_softmax\n\nif __name__ == '__main__':\n\n \"\"\" configuration \"\"\"\n parser = ArgumentParser(description='configuration switches')\n parser.add_argument('-c', '--config', type=str)\n parser.add_argument('-d', '--device', type=str)\n parser.add_argument('-r', '--run_id', type=int, default=-1)\n parser.add_argument('--comment', type=str)\n parser.add_argument('--silent', action='store_true', default=False)\n\n \"\"\" reproducibility \"\"\"\n parser.add_argument('--seed', type=int, default=None)\n\n \"\"\" main loop control \"\"\"\n parser.add_argument('--max_steps', type=int, default=100000)\n parser.add_argument('--test_steps', type=int, default=30000)\n parser.add_argument('--test_episodes', type=int, default=10)\n\n \"\"\" resume settings \"\"\"\n parser.add_argument('--demo', action='store_true', default=False)\n parser.add_argument('-l', '--load', type=str, default=None)\n\n \"\"\" environment \"\"\"\n parser.add_argument('--env_name', type=str, default='Bandit-v1')\n parser.add_argument('--env_render', action='store_true', default=False)\n\n \"\"\" hyper-parameters \"\"\"\n parser.add_argument('--optim_lr', type=float, default=1e-2)\n parser.add_argument('--batch_size', type=int, default=8)\n parser.add_argument('--discount', type=float, default=0.99)\n parser.add_argument('--hidden_dim', type=int, default=16)\n\n config = parser.parse_args()\n\n \"\"\" random seed \"\"\"\n if config.seed is not None:\n torch.manual_seed(config.seed)\n\n wandb.init(project=f\"awac-{config.env_name}\", config=config)\n\n \"\"\" environment \"\"\"\n def make_env():\n env = gym.make(config.env_name)\n if config.seed is not None:\n env.seed(config.seed)\n env.action_space.seed(config.seed)\n return env\n\n \"\"\" training env with replay buffer \"\"\"\n train_env, train_buffer = bf.wrap(make_env())\n train_buffer.enrich(bf.DiscountedReturns(discount=config.discount))\n train_env = wandb_utils.LogRewards(train_env)\n if not config.silent:\n train_env = Plot(train_env, episodes_per_point=5, title=f'Train awac-{config.env_name}')\n\n \"\"\" test env \"\"\"\n test_env = make_env()\n if not config.silent:\n test_env = Plot(test_env, episodes_per_point=1, title=f'Test awac-{config.env_name}')\n evaluator = helper.Evaluator(test_env)\n\n \"\"\" network \"\"\"\n class AWACnet(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, input_dims, actions, hidden_dims):\n super().__init__()\n self.q = nn.Parameter(torch.randn(input_dims, actions))\n self.policy = nn.Parameter(torch.randn(input_dims, actions))\n # self.q = nn.Sequential(nn.Linear(input_dims, hidden_dims), nn.SELU(inplace=True),\n # nn.Linear(hidden_dims, hidden_dims), nn.SELU(inplace=True),\n # nn.Linear(hidden_dims, 2))\n # self.scale = nn.Linear(input_dims, 1, bias=False)\n\n def forward(self, state):\n i = torch.argmax(state, dim=1)\n values = self.q[i]\n actions = self.policy[i]\n action_dist = Categorical(logits=log_softmax(actions, dim=1))\n return values, action_dist\n\n awac_net = AWACnet(\n input_dims=test_env.observation_space.n,\n actions=test_env.action_space.n,\n hidden_dims=config.hidden_dim)\n q_optim = torch.optim.Adam([awac_net.q], lr=config.optim_lr)\n policy_optim = torch.optim.Adam([awac_net.policy], lr=config.optim_lr)\n\n \"\"\" load weights from file if required\"\"\"\n if exists_and_not_none(config, 'load'):\n checkpoint.load(config.load, prefix='best', awac_net=awac_net, optim=q_optim)\n\n \"\"\" policy to run on environment \"\"\"\n def policy(state):\n state = torch.from_numpy(state).float().unsqueeze(0)\n value, action = awac_net(state)\n a = action.sample()\n assert torch.isnan(a) == False\n return a.cpu().numpy()\n\n \"\"\" demo \"\"\"\n evaluator.demo(config.demo, policy)\n\n \"\"\" main loop \"\"\"\n steps = 0\n best_mean_return = -999999\n tests_run = 0\n\n for total_steps, _ in enumerate(driver.step_environment(train_env, policy)):\n steps += 1\n if total_steps > config.max_steps:\n break\n\n \"\"\" train offline after batch steps saved\"\"\"\n if steps < config.batch_size:\n continue\n else:\n awac.train_discrete(train_buffer, awac_net, critic_optim=q_optim, actor_optim=policy_optim, batch_size=config.batch_size)\n steps = 0\n print(\"QTABLE\")\n print(awac_net.q)\n print(\"POLICY TABLE\")\n print(Categorical(logits=log_softmax(awac_net.policy, dim=1)).probs)\n \"\"\" test \"\"\"\n if total_steps > config.test_steps * tests_run:\n tests_run += 1\n evaluator.evaluate(policy, config.run_dir, {'awac_net': awac_net, 'q_optim': q_optim, 'policy_optim': policy_optim})","repo_name":"DuaneNielsen/deep_rl","sub_path":"baselines/awac/awac_bandit.py","file_name":"awac_bandit.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21530030417","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nwdata = pd.read_csv(\"WeatherDataWindows.csv\", delimiter=',') # takes data from the csv file w/panda\r\ndate = wdata['Date'] # array of dates\r\nwdata['Date_1'] = pd.to_datetime(date, infer_datetime_format=True)\r\navg_temp = wdata['Temp Avg'] # array of average temperature\r\navg_press = wdata['Pressure Avg'] # array of average pressure\r\nprec = wdata['Precipitation (in.)'] # array of precipitation\r\nspdate = date.str.split(pat='/') # splits the date string\r\navg_dew = wdata['Dew Point Avg']\r\n##line_graph############################################################################################################\r\nplt.plot(date, avg_temp, label=\"Average Temperature\") # plots date and average temperature\r\nplt.plot(date, avg_press, label=\"Average Pressure\") # plots date and average pressure\r\nplt.xlabel('date') # labels x-axis\r\nplt.ylabel('Average Temperature/Pressure') # labels y-axis\r\nplt.title(\"line Graph\") # labels graph title\r\nplt.xticks(np.arange(1, 1096, 200)) # marks ticks for x-axis\r\nplt.yticks(sorted(np.arange(25, 100, 4))) # marks ticks for y-axis\r\nplt.legend() # plots the legend\r\nplt.show() # prints graph\r\n\r\n##histogram#############################################################################################################\r\nwdata[['Precipitation (in.)']].plot(kind='hist', bins=20, rwidth=0.5) # plots a histogram of avg temp and dew point\r\nplt.ylabel('Average Temperature')\r\nplt.xlabel('Average Dew Point')\r\nplt.legend()\r\nplt.show()\r\n\r\n##scatter_plot##########################################################################################################\r\nplt.scatter(avg_temp, avg_dew) # sets up scatter plot\r\nplt.title(\"Average Temperature vs. Average Dew Point\") # titles graph\r\nplt.xlabel('Average Temperature') # labels x-axis\r\nplt.ylabel('Average Dew Point') # labels y-axis\r\nplt.legend()\r\nplt.show()\r\n\r\n##bar_chart#############################################################################################################\r\nwdata_grouped = wdata.groupby(wdata['Date_1'].dt.strftime('%B'))['Temp High', 'Temp Avg', 'Temp Low'].sum()\r\nwdata_grouped.plot(kind='bar') # plots it in bar graph form\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"nichog27/PycharmProjects","sub_path":"WeatherData_matplotlib.py","file_name":"WeatherData_matplotlib.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24766154211","text":"#!/usr/bin/env python\n\n# Imports ----------------------------------------------------------------------\n\nimport argparse\nfrom pathlib import Path\n\nimport pandas\nfrom tqdm import tqdm\nimport Bio.SeqIO.FastaIO as FastaIO\n\nimport magpipe.log\nimport magpipe.pretty\n\n# Define utilitary functions ---------------------------------------------------\n\ndef get_arguments():\n \"\"\"\n Get commandline arguments and return namespace\n \"\"\"\n # Initialize Parser\n parser = argparse.ArgumentParser()\n # REQUIRED arguments:\n parser.add_argument('-a', '--annotations', help='Path to the annotations directory, which contains FetchMGs/{genomes}-bestMGs and -allMGs.', required=True, type=str)\n parser.add_argument('-o', '--output', help='Path and name of the output directory.', required=True, type=str)\n # OPTIONAL arguments:\n #parser.add_argument('-g', '--genomes', help='A file with the list of genomes to process.', required=False, default='/nfs/nas22/fs2202/biol_micro_sunagawa/Projects/EAN/MAGPIPE_MAGS_EAN/scratch/processed/integrated/go_microbiomics/go_microbiomics-integrated-cpl50_ctn10-dictionaries/test-genomes-specI', type=str)\n parser.add_argument('-g', '--genomes', help='A file with the list of genomes to process.', required=False, default='/nfs/nas22/fs2202/biol_micro_sunagawa/Projects/EAN/MAGPIPE_MAGS_EAN/scratch/processed/integrated/go_microbiomics/go_microbiomics-integrated-cpl50_ctn10-dictionaries/go_microbiomics-integrated-cpl50_ctn10-prokarya.txt', type=str)\n parser.add_argument('-t', '--taxo', help='Path to the gtdbtk taxonomy of the genomes. Just path/prefix (usually gtdbtk).', required=False, default='/nfs/nas22/fs2202/biol_micro_sunagawa/Projects/EAN/MAGPIPE_MAGS_EAN/scratch/processed/integrated/go_microbiomics/go_microbiomics-integrated-cpl50_ctn10-gtdbtk/gtdbtk', type=str)\n # Return namespace\n return parser.parse_args()\n\ndef load_file_as_list(input_file): \n \"\"\"\n Takes a file file and lists all the entries in it\n \"\"\"\n magpipe.pretty.print_pair(\"Loading file\", input_file)\n file_list = []\n file_path = Path(input_file) \n if not file_path.exists():\n raise ValueError('Well then, input file {} doesn\\'t exist.'.format(file_path))\n with open(file_path) as handle:\n for line in handle: \n file_list.append(line.strip())\n return file_list\n\ndef process_marker_genes(annotations, genome, fetch_type, output):\n \"\"\"\n Takes all the marke genes (mgs) list for a genome, renames them and move them over\n refg:\n Adds the Marker Genes of a reference genome to thee combined marker genes file\n For that we need to read the bestMGs of that genome, rename them and append them\n to the correct input file for specI\n else:\n Adds the marker genes of a MAG or a SAG to the combined marker genes\n For that we read the marker genes for that genome, ignore the duplicated ones\n rename and write to file for specI\n \"\"\"\n output = Path(output)\n mgspath = Path(annotations).joinpath(genome, \"fetchMGs\", genome + \"-\" + fetch_type.lstrip('-'))\n if not mgspath.exists():\n raise FileNotFoundError(\"Couldn't find {}...\".format(mgspath))\n mgs = [f.name.replace(\".fna\", \"\") for f in mgspath.glob(\"*.fna\")]\n if len(mgs) != 40:\n raise ValueError(\"Well, I was expecting 40 MGs and found {} when processing {}.\".format(len(mgs), genome))\n genome_id = genome.replace(\".\", \":\") + \".\" + genome.replace(\".\", \":\")\n for mg in mgs:\n mg_fna = mgspath.joinpath(mg + \".fna\")\n mg_faa = mgspath.joinpath(mg + \".faa\")\n if mg_fna.stat().st_size == 0 and mg_faa.stat().st_size == 0:\n continue # skip empty files, i.e. the mg wasn't found\n with open(mg_fna) as fna_in, open(mg_faa) as faa_in:\n seqlist = [{'fna_h': fna_h, 'fna_s': fna_s, 'faa_h': faa_h, 'faa_s': faa_s} for (fna_h, fna_s), (faa_h, faa_s) in zip(FastaIO.SimpleFastaParser(fna_in), FastaIO.SimpleFastaParser(faa_in))]\n if len(seqlist) > 1:\n continue # skipping duplicated genes as we can't tell which is the right one and they could mess up specI\n seqdict = seqlist[0]\n if seqdict['fna_h'] != seqdict['faa_h']:\n raise ValueError(\"Mmmh, I've got gene names that don't match between fna and faa here... See {}, {} gave {} and {}.\".format(genome, mg, seqdict['fna_h'], seqdict['faa_h']))\n header = genome_id + \".\" + seqdict['fna_h'].replace(\".\", \":\")\n with open(output.joinpath(mg + \".fna\"), \"a\") as fna_out, open(output.joinpath(mg + \".faa\"), \"a\") as faa_out:\n fna_out.write(\">\" + header + \"\\n\")\n fna_out.write(seqdict['fna_s'] + \"\\n\")\n faa_out.write(\">\" + header + \"\\n\")\n faa_out.write(seqdict['faa_s'] + \"\\n\")\n return genome_id\n\ndef main(args):\n \"\"\"\n Main function to process the marker genes for checkm \n \"\"\"\n output = Path(args.output)\n if output.exists():\n raise ValueError('Seems the script has already been ran... cleanup and start again!')\n mgs_out = output.joinpath(\"MGs\")\n mgs_out.mkdir(parents=True)\n genomes_list = load_file_as_list(args.genomes)\n ids_list = []\n for genome in tqdm(genomes_list, ncols=100):\n if \"REFG\" in genome:\n genome_id = process_marker_genes(args.annotations, genome, \"bestMGs\", mgs_out)\n else:\n genome_id = process_marker_genes(args.annotations, genome, \"allMGs\", mgs_out)\n ids_list.append(genome_id)\n # Write the id list\n with open(output.joinpath(\"genome_ids_for_specI.txt\"), \"w\") as handle:\n handle.write(\"\\n\".join(ids_list))\n # get the taxo for specI\n bac_table = pandas.read_csv(args.taxo + \".bac120.summary.tsv\", sep=\"\\t\")\n bac_table = bac_table[[\"user_genome\", \"classification\"]]\n arc_table = pandas.read_csv(args.taxo + \".ar122.summary.tsv\", sep=\"\\t\")\n arc_table = arc_table[[\"user_genome\", \"classification\"]]\n taxo = bac_table.append(arc_table)\n taxo.user_genome = [i.replace('.', ':') for i in taxo.user_genome]\n taxo.to_csv(output.joinpath(\"genome_taxo_for_specI.tsv\"), index=False, sep=\"\\t\")\n\nif __name__ == '__main__':\n args = get_arguments()\n magpipe.log.print_arguments(args)\n main(args)\n","repo_name":"SushiLab/magpipe","sub_path":"scripts/prepare_specI.py","file_name":"prepare_specI.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"8173626274","text":"import skvideo.measure\nimport numpy as np\nimport numpy as np\n\nimport skvideo.measure\n\noutputfile = \"test.mp4\"\noutputdata = np.random.random(size=(30, 480, 640, 3)) * 255\noutputdata = outputdata.astype(np.uint8)\n\n# start the FFmpeg writing subprocess with following parameters\nwriter = skvideo.io.FFmpegWriter(outputfile, outputdict={\n '-vcodec': 'libx264', '-b': '300000000'\n})\n\nfor i in range(30):\n writer.writeFrame(outputdata[i])\nwriter.close()\n\ninputdata = skvideo.io.vread(outputfile)\n\n# test each frame's SSIM score\nmSSIM = 0\nfor i in range(30):\n mSSIM += skvideo.measure.ssim(np.mean(inputdata[i], axis=2), np.mean(outputdata[i], axis=2))\n\nmSSIM /= 30.0\nprint(mSSIM)\n","repo_name":"scikit-video/scikit-video","sub_path":"doc/examples/outputdictexample.py","file_name":"outputdictexample.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":638,"dataset":"github-code","pt":"81"} +{"seq_id":"3372869354","text":"# coding: utf-8\nimport os\nimport importlib\n\n\ndefault_settings = {\n 'DEBUG': False,\n 'PORT': 8000,\n 'TIME_ZONE': 'Asia/Shanghai',\n}\n\nclass Settings:\n def __init__(self):\n for k, v in default_settings.items():\n self.__dict__[k] = v\n extra_env = os.environ.get('CROGULL_SETTINGS_MODULE')\n if extra_env is not None:\n extra_settings = importlib.import_module(extra_env)\n for attr in dir(extra_settings):\n if not attr.isupper():\n continue\n self.__dict__[attr] = getattr(extra_settings, attr)\n\n\nsettings = Settings()\n","repo_name":"kerol/crogull","sub_path":"crogull/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41469023267","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask_app import DATABASE, bcrypt\nfrom datetime import datetime\n\nclass Meeting:\n def __init__( self , data ):\n self.id = data['id']\n self.meeting_date = data['meeting_date'].strftime(\"%B %d, %Y\")\n self.meeting_location = data['name']\n \n # C - Create methods / INSERT a new entry into a table\n @classmethod\n def save(cls, data):\n query = \"INSERT INTO meetings (meeting_location_id, meeting_date, meeting_date_id) VALUES (%(meeting_location_id)s, %(meeting_date)s, 5 );\"\n return connectToMySQL(DATABASE).query_db(query, data)\n \n # R - Read methods / return data from table\n @classmethod\n def get_all(cls):\n query = \"SELECT meetings.id, meetings.meeting_date, meeting_locations.name FROM meetings JOIN meeting_locations ON meetings.meeting_location_id = meeting_locations.id WHERE meetings.meeting_date >= CURRENT_DATE() ORDER BY meetings.meeting_date;\"\n results = connectToMySQL(DATABASE).query_db(query)\n meetings = [] \n for meeting in results:\n meetings.append( cls(meeting) )\n return meetings # Returns a list of class instances\n\n @classmethod\n def get_next(cls):\n query = \"SELECT meetings.id, meetings.meeting_date, meeting_locations.name FROM meetings JOIN meeting_locations ON meetings.meeting_location_id = meeting_locations.id WHERE meetings.meeting_date >= CURRENT_DATE() ORDER BY meetings.meeting_date LIMIT 1;\"\n result = connectToMySQL(DATABASE).query_db(query)\n return cls(result[0]) # Returns the next meeting as a class instance\n\n # U - Update methods / UPDATE existing entries with new values\n @classmethod\n def update_one(cls, data):\n query = 'UPDATE meetings SET meeting_date=%(meeting_date)s, meeting_location_id=%(meeting_location_id)s, meeting_date_id=5 WHERE id=%(id)s;'\n return connectToMySQL(DATABASE).query_db(query, data)\n\n # D - Delete methods / DELETE existing entries from table\n @classmethod\n def delete_one(cls, data):\n query = \"DELETE FROM meetings WHERE id= %(id)s;\"\n return connectTomMySQL(DATABASE).query_db(query, data)\n\n \n","repo_name":"brendobrendo/tot-website","sub_path":"flask_app/models/model_meetings.py","file_name":"model_meetings.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32926282837","text":"from itertools import combinations_with_replacement\n\nsample = 'TOR'\nnum = eval(input())\n\nif num > len(sample) * 2:\n text = sample * (num // len(sample))\n if num % len(sample) != 0:\n text += sample\n\n ans = list(sorted(set((filter(lambda x: ''.join(x).count(sample) == 2, combinations_with_replacement(text, num)))))) #cringe\n\n for i in range(len(ans)):\n print(*ans[i], sep='', end='')\n if i != len(ans) - 1:\n print(',', end=' ')\n\nelif num == len(sample) * 2:\n print(sample * 2, end='')","repo_name":"satoad/pythonprac","sub_path":"20221025/3/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34513721775","text":"#!/usr/bin/python3\n'''Read out Wind.bin file'''\n\nimport time, subprocess\n\nt0 = time.mktime((2016,1,1,0,0,0,0,0,0))\nfileName = '/home/gbf/repos/WindMon/rpi/Wind.bin'\nconnectToRPi = ['sshfs', '10.11.2.189:/home/gbf/WindMon', '/home/gbf/repos/WindMon/rpi']\ndisconnectFromRPi = ['fusermount', '-u', '/home/gbf/repos/WindMon/rpi']\ndispLen = 80\ndataLen = 8\nnorth = 26360 # raw a/d\noffset = 275 # deg\n\n\ndef main():\n try:\n f = open(fileName, 'rb')\n except FileNotFoundError:\n subprocess.call(connectToRPi)\n f = open(fileName, 'rb')\n f.seek(-dataLen * dispLen, 2)\n t0 = 1456519594.0\n dirCnt = 0\n velCnt = 0\n dirSum = 0\n initTime = 0\n finalTime = 0\n for i in range(0,dispLen):\n b = f.read(dataLen)\n if len(b) < 8:\n break\n sec = int.from_bytes(b[0:4], 'little')\n ms = int.from_bytes(b[4:6], 'little')\n t1 = sec + ms/10000.0\n if (b[7]== 0x80): # This is a velocity tick\n if velCnt == 0:\n initTime = t1\n velCnt += 1\n finalTime = t1\n dt = t1-t0\n t0 = t1\n vel = int(2.23 / dt)\n print(time.ctime(t1) + ' velocity = {0:3d} mph; dt = {1:9.4f}'.format(vel, dt))\n else: # This is a direction measurement\n d = int.from_bytes(b[6:8], 'little')\n if dirCnt == 0:\n d0 = d\n if d - d0 > north / 2:\n d -= north\n if d0 -d > north / 2:\n d += north\n dirCnt += 1\n dirSum += d\n\n direction = int(offset + 360.0 * d / north) % 360\n print(time.ctime(t1) + ' {0:3d} deg - {1:6d}'.format(direction, int.from_bytes(b[6:8], 'little')))\n\n f.close()\n dir = (offset + int(360.0 * dirSum / dirCnt / north)) % 360\n if (initTime == 0) or (finalTime == 0):\n vel = 0\n else:\n vel = int((velCnt-1) * 2.23 / (finalTime - initTime))\n\n print('Wind is {0:3d} mph at {1:3d} degrees'.format(vel, dir))\n time.sleep(1)\n subprocess.call(disconnectFromRPi)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gbfoote/WindMon","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23727948891","text":"# -*- coding: utf-8 -*-\n\"\"\"Test the ProjectSpace content.\"\"\"\n\nfrom zope.interface import Invalid\nfrom zope.i18n import translate\n\nfrom Products.CMFCore.utils import getToolByName\n\nfrom imio.project.core.testing import FunctionalTestCase\nfrom imio.project.core.content.projectspace import IProjectSpace\nfrom imio.project.core.content.projectspace import RemovedValueIsNotUsedByCategoriesFieldValidator\nfrom imio.project.core.content.projectspace import RemovedValueIsNotUsedByPriorityFieldValidator\nfrom imio.project.core.content.projectspace import RemovedValueIsNotUsedByBudgetTypesFieldValidator\nfrom imio.project.core.content.projectspace import ERROR_VALUE_REMOVED_IS_IN_USE\n\n\nclass TestProjectSpace(FunctionalTestCase):\n \"\"\"Test the PorjectSpace content.\"\"\"\n\n def test_RemovedValueIsNotUsedByCategoriesFieldValidator(self):\n \"\"\"Test the RemovedValueIsNotUsedByCategoriesFieldValidator validator that\n validates that removed keys in the ProjectSpace.categories attribute actually managing\n a vocabulary are not used by already created elements and can be removed safely.\"\"\"\n stored_value = list(self.portal.projectspace.categories_values)\n categories_validator = RemovedValueIsNotUsedByCategoriesFieldValidator(self.portal.projectspace,\n None,\n None,\n IProjectSpace['categories_values'],\n None)\n self._checkValidateKeyNotUsed(stored_value, categories_validator, 'categories', 'Category', None)\n\n def test_RemovedValueIsNotUsedByPriorityFieldValidator(self):\n \"\"\"Test the RemovedValueIsNotUsedByPriorityFieldValidator validator that\n validates that removed keys in the ProjectSpace.priority attribute actually managing\n a vocabulary are not used by already created elements and can be removed safely.\"\"\"\n # check the priority validator\n stored_value = list(self.portal.projectspace.priority_values)\n priority_validator = RemovedValueIsNotUsedByPriorityFieldValidator(self.portal.projectspace,\n None,\n None,\n IProjectSpace['priority_values'],\n None)\n self._checkValidateKeyNotUsed(stored_value, priority_validator, 'priority', 'Priority', None)\n\n def test_RemovedValueIsNotUsedByBudgetTypesFieldValidator(self):\n \"\"\"Test the RemovedValueIsNotUsedByBudgetTypesFieldValidator validator that\n validates that removed keys in the ProjectSpace.budget_types attribute actually managing\n a vocabulary are not used by already created elements and can be removed safely.\"\"\"\n # check the budget_types validator\n stored_value = list(self.portal.projectspace.budget_types)\n budget_types_validator = RemovedValueIsNotUsedByBudgetTypesFieldValidator(self.portal.projectspace,\n None,\n None,\n IProjectSpace['budget_types'],\n None)\n self._checkValidateKeyNotUsed(stored_value, budget_types_validator, 'budget', 'Budget type', 'budget_type')\n\n def _checkValidateKeyNotUsed(self, stored_value, validator, fieldName, fieldValue, sub_attribute_using_key):\n \"\"\"\n Helper method for testing the RemovedValueIsNotUsedByXXXFieldValidator\n \"\"\"\n plone_utils = getToolByName(self.portal, 'plone_utils')\n # just calling it with no changes\n validator.validate(stored_value)\n\n # now add a value\n new_value = list(stored_value)\n new_value.append({'label': u\"New value\", 'key': 'new-value'})\n # still behaving right\n validator.validate(new_value)\n\n # now remove a used value\n # remove value used by 'project-1'\n project1 = self.portal.projectspace['project-1']\n field_key = \"%s-1\" % plone_utils.normalizeString(fieldValue)\n project1_field_value = getattr(project1, fieldName)\n # now take into account the fact that we check a datagridfield or a simple value\n # either we take the saved value, either we get the value in the relevant column of a datagridfield...\n value_to_compare = sub_attribute_using_key and \\\n project1_field_value[0][sub_attribute_using_key] or \\\n project1_field_value\n self.assertEquals(value_to_compare, field_key)\n # remove the first element of new-value that is actually used\n new_value_without_first = list(new_value)\n # the first element that we will pop is the one we expect\n self.assertEquals(new_value_without_first[0], {'label': u\"%s 1\" % fieldValue, 'key': field_key})\n new_value_without_first.pop(0)\n with self.assertRaises(Invalid) as raised:\n validator.validate(new_value_without_first)\n self.assertEquals(translate(raised.exception.message),\n translate(ERROR_VALUE_REMOVED_IS_IN_USE,\n mapping={'removed_key': field_key,\n 'used_by_url': 'http://nohost/plone/projectspace/project-1', }))\n\n # now remove project1 using the value, the new_value_without_first will validate correctly then\n self.portal.projectspace.manage_delObjects(ids=['project-1', ])\n validator.validate(new_value_without_first)\n\n # a value that is not used at all can be removed, like last added one 'new-value'\n new_value_without_newvalue = list(new_value)\n self.assertEquals(new_value_without_newvalue[-1], {'label': u\"New value\", 'key': 'new-value'})\n new_value_without_newvalue.pop(-1)\n validator.validate(new_value_without_newvalue)\n","repo_name":"IMIO/imio.project.core","sub_path":"src/imio/project/core/tests/test_projectspace.py","file_name":"test_projectspace.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21541234346","text":"from copy import deepcopy\nfrom basic_ops.helpers.string_helpers import format_transition\n\n\ndef get_accessible(automaton):\n \"\"\"Copies the automaton and prunes off any transitions/states which are not\n accessible from the initial state.\n\n Parameters\n ----------\n automaton : dictionary\n The automaton to remove unnecessary transitions/states\n\n Returns\n -------\n dictionary\n The resulting pruned, accessible automaton\n \"\"\"\n automaton = deepcopy(automaton)\n events = automaton[\"events\"][\"all\"]\n transitions = automaton[\"transitions\"][\"all\"]\n queue = automaton[\"states\"][\"initial\"].copy()\n accessible_states = set(queue)\n accessible_trans = dict()\n\n # Visit every state starting from initial\n while len(queue) > 0:\n curr = queue.pop(0)\n # Search for all accessible states from curr\n for event in events:\n trans = format_transition(curr, event)\n # If transition exists, add it and the state\n if trans in transitions:\n to = transitions[trans]\n accessible_trans[trans] = to\n for state in to:\n if state not in accessible_states:\n accessible_states.add(state)\n queue.append(state)\n\n # Update the states\n automaton[\"states\"][\"all\"] = sorted(accessible_states)\n automaton[\"states\"][\"marked\"] = [\n [s for s in x if s in accessible_states]\n for x in automaton[\"states\"][\"marked\"]\n ]\n # Deal with all the various possible types of states\n all_state_types = [\"bad\", \"v1\", \"v2\", \"bad-v1\", \"bad-v2\"]\n state_types = [x for x in all_state_types if x in automaton[\"states\"]]\n for state_type in state_types:\n automaton[\"states\"][state_type] = [\n x for x in automaton[\"states\"][state_type] if x in accessible_states\n ]\n\n # Update the transitions\n automaton[\"transitions\"][\"all\"] = accessible_trans\n # Deal with all various possible types of transitions\n all_trans_types = [\"v1\", \"v2\", \"bad\"]\n trans_types = [x for x in all_trans_types if x in automaton[\"transitions\"]]\n for trans_type in trans_types:\n updated_trans = dict()\n for k, v in automaton[\"transitions\"][trans_type].items():\n if k in accessible_trans:\n updated_trans[k] = v\n automaton[\"transitions\"][trans_type] = updated_trans\n\n return automaton\n","repo_name":"gzinck/des","sub_path":"basic_ops/accessible.py","file_name":"accessible.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"2465906434","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\n\n# system imports\nimport os.path as osp\nimport asyncio\nimport urllib.parse\nfrom typing import Any\n\n# external imports\nimport toga\nfrom toga.style.pack import Pack\nfrom toga.constants import ROW, COLUMN\nfrom maestral.daemon import MaestralProxy\nfrom maestral.models import SyncErrorEntry\nfrom maestral.utils import sanitize_string\n\n# local imports\nfrom .private.widgets import Label, FollowLinkButton, Icon, Window\nfrom .private.constants import WORD_WRAP\n\n\nPADDING = 10\nICON_SIZE = 48\nWINDOW_SIZE = (370, 400)\n\n\nclass SyncIssueView(toga.Box):\n def __init__(self, sync_err: SyncErrorEntry) -> None:\n super().__init__(style=Pack(direction=COLUMN))\n\n self.sync_err = sync_err\n\n icon = Icon(for_path=self.sync_err.local_path)\n\n # noinspection PyTypeChecker\n image_view = toga.ImageView(\n image=icon,\n style=Pack(\n width=ICON_SIZE,\n height=ICON_SIZE,\n padding=(0, 12, 0, 3),\n ),\n )\n\n path_label = Label(\n sanitize_string(osp.basename(self.sync_err.dbx_path)),\n style=Pack(\n padding_bottom=PADDING / 2,\n ),\n )\n error_label = Label(\n f\"{self.sync_err.title}:\\n{self.sync_err.message}\",\n linebreak_mode=WORD_WRAP,\n style=Pack(\n font_size=11,\n width=WINDOW_SIZE[0] - 4 * PADDING - 15 - ICON_SIZE,\n padding_bottom=PADDING / 2,\n ),\n )\n\n link_local = FollowLinkButton(\n \"Show in Finder\",\n url=self.sync_err.local_path,\n locate=True,\n style=Pack(\n padding_right=PADDING,\n font_size=11,\n height=12,\n ),\n )\n link_local.enabled = osp.exists(self.sync_err.local_path)\n\n quoted_dbx_path = urllib.parse.quote(self.sync_err.dbx_path)\n dbx_address = f\"https://www.dropbox.com/preview{quoted_dbx_path}\"\n\n link_dbx = FollowLinkButton(\n \"Show Online\",\n url=dbx_address,\n style=Pack(font_size=11, height=12),\n )\n\n link_box = toga.Box(\n children=[link_local, link_dbx],\n style=Pack(direction=ROW),\n )\n info_box = toga.Box(\n children=[path_label, error_label, link_box],\n style=Pack(direction=COLUMN, flex=1),\n )\n content_box = toga.Box(\n children=[image_view, info_box],\n style=Pack(direction=ROW),\n )\n\n hline = toga.Divider(style=Pack(padding=(PADDING, 0, PADDING, 0)))\n\n self.add(content_box, hline)\n\n\nclass SyncIssuesWindow(Window):\n def __init__(self, mdbx: MaestralProxy, app: toga.App) -> None:\n super().__init__(title=\"Maestral Sync Issues\", release_on_close=False, app=app)\n self.on_close = self.on_close_pressed\n\n self.mdbx = mdbx\n\n self._refresh = False\n self._refresh_interval = 1\n self._sync_issue_widgets: dict[str, SyncIssueView] = dict()\n\n self._placeholder = Label(\n \"No sync issues 😊\", style=Pack(padding_bottom=PADDING)\n )\n\n self.size = WINDOW_SIZE\n\n self.sync_errors_box = toga.Box(\n style=Pack(\n direction=COLUMN,\n padding=2 * PADDING,\n ),\n )\n self.scroll_container = toga.ScrollContainer(\n content=self.sync_errors_box,\n horizontal=False,\n )\n\n self.content = self.scroll_container\n self.center()\n\n self.refresh_gui()\n\n async def periodic_refresh_gui(self, sender: Any = None) -> None:\n while self._refresh:\n self.refresh_gui()\n await asyncio.sleep(self._refresh_interval)\n\n def _has_placeholder(self) -> bool:\n return self._placeholder in self.sync_errors_box.children\n\n def refresh_gui(self) -> None:\n new_errors = self.mdbx.sync_errors\n\n # remove placeholder if the error count > 0\n\n if len(new_errors) > 0 and self._has_placeholder():\n self.sync_errors_box.remove(self._placeholder)\n\n # add new errors\n\n new_err_paths: set[str] = set()\n\n for error in new_errors:\n new_err_paths.add(error.dbx_path)\n if error.dbx_path not in self._sync_issue_widgets:\n widget = SyncIssueView(error)\n self.sync_errors_box.add(widget)\n self._sync_issue_widgets[error.dbx_path] = widget\n\n # remove old errors\n\n for dbx_path in self._sync_issue_widgets.copy():\n if dbx_path not in new_err_paths:\n widget = self._sync_issue_widgets.pop(dbx_path)\n self.sync_errors_box.remove(widget)\n\n # add placeholder if we don't have any errors\n if len(new_errors) == 0 and not self._has_placeholder():\n self.sync_errors_box.add(self._placeholder)\n\n def on_close_pressed(self, sender: Any = None) -> bool:\n self._refresh = False\n return True\n\n def show(self) -> None:\n self._refresh = True\n self.app.add_background_task(self.periodic_refresh_gui)\n super().show()\n","repo_name":"samschott/maestral-cocoa","sub_path":"src/maestral_cocoa/syncissues.py","file_name":"syncissues.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"40936965644","text":"import os\nimport subprocess\nimport pytest\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nfrom lib.db import DB\nfrom lib import utils\nfrom lib.global_config import GlobalConfig\nfrom tests import test_functions as Tests\nfrom runner import Runner\n\n\nconfig = GlobalConfig(config_name='test-config.yml').config\n\n#pylint: disable=unused-argument # unused arguement off for now - because there are no running tests in this file\n@pytest.fixture(name=\"reset_config\")\ndef reset_config_fixture():\n idle_start_time = config['measurement']['idle-time-start']\n idle_time_end = config['measurement']['idle-time-end']\n flow_process_runtime = config['measurement']['flow-process-runtime']\n yield\n config['measurement']['idle-time-start'] = idle_start_time\n config['measurement']['idle-time-end'] = idle_time_end\n config['measurement']['flow-process-runtime'] = flow_process_runtime\n\n@pytest.fixture(autouse=True, scope=\"module\", name=\"build_image\")\ndef build_image_fixture():\n uri = os.path.abspath(os.path.join(\n CURRENT_DIR, 'stress-application/'))\n subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True)\n\n#pylint: disable=expression-not-assigned\ndef run_runner():\n uri = os.path.abspath(os.path.join(\n CURRENT_DIR, 'stress-application/'))\n\n # Run the application\n RUN_NAME = 'test_' + utils.randomword(12)\n runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', verbose_provider_boot=True, dev_repeat_run=True, skip_system_checks=True)\n return runner.run()\n\n# Rethink how to do this test entirely\ndef wip_test_idle_start_time(reset_config):\n config['measurement']['idle-time-start'] = 2\n run_id = run_runner()\n query = \"\"\"\n SELECT\n time, note\n FROM\n notes\n WHERE\n run_id = %s\n ORDER BY\n time\n \"\"\"\n\n notes = DB().fetch_all(query, (run_id,))\n\n timestamp_preidle = [note for note in notes if \"Booting\" in note[1]][0][0]\n timestamp_start = [note for note in notes if note[1] == 'Start of measurement'][0][0]\n\n #assert that the difference between the two timestamps is roughly 2 seconds\n diff = (timestamp_start - timestamp_preidle)/1000000\n assert 1.9 <= diff <= 2.1, \\\n Tests.assertion_info('2s apart', f\"timestamp difference of notes: {diff}s\")\n\n# Rethink how to do this test entirely\ndef wip_test_idle_end_time(reset_config):\n config['measurement']['idle-time-end'] = 2\n run_id = run_runner()\n query = \"\"\"\n SELECT\n time, note\n FROM\n notes\n WHERE\n run_id = %s\n ORDER BY\n time\n \"\"\"\n\n notes = DB().fetch_all(query, (run_id,))\n timestamp_postidle = [note for note in notes if note[1] == 'End of post-measurement idle'][0][0]\n timestamp_end = [note for note in notes if note[1] == 'End of measurement'][0][0]\n\n #assert that the difference between the two timestamps is roughly 2 seconds\n diff = (timestamp_postidle - timestamp_end)/1000000\n assert 1.9 <= diff <= 2.1, \\\n Tests.assertion_info('2s apart', f\"timestamp difference of notes: {diff}s\")\n\ndef wip_test_process_runtime_exceeded(reset_config):\n config['measurement']['flow-process-runtime'] = .1\n with pytest.raises(RuntimeError) as err:\n run_runner()\n expected_exception = 'Process exceeded runtime of 0.1s: stress-ng -c 1 -t 1 -q'\n assert expected_exception in str(err.value), \\\n Tests.assertion_info(expected_exception, str(err.value))\n","repo_name":"green-coding-berlin/green-metrics-tool","sub_path":"tests/test_config_opts.py","file_name":"test_config_opts.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"81"} +{"seq_id":"40342510525","text":"'''\r\nCreated on 23 feb. 2016\r\n\r\n@author: brandtp\r\n'''\r\nimport unittest\r\nfrom EdoalParser.alignment import Alignment, removeWS\r\nfrom EdoalParser.corresp import Correspondence\r\n\r\ntry:\r\n from lxml import etree\r\n print(\"running with lxml.etree\")\r\nexcept ImportError:\r\n try:\r\n # Python 2.5\r\n import xml.etree.cElementTree as etree\r\n print(\"running with cElementTree on Python 2.5+\")\r\n except ImportError:\r\n try:\r\n # Python 2.5\r\n import xml.etree.ElementTree as etree\r\n print(\"running with ElementTree on Python 2.5+\")\r\n except ImportError:\r\n try:\r\n # normal cElementTree install\r\n import cElementTree as etree\r\n print(\"running with cElementTree\")\r\n except ImportError:\r\n try:\r\n # normal ElementTree install\r\n import elementtree.ElementTree as etree\r\n print(\"running with ElementTree\")\r\n except ImportError:\r\n print(\"Failed to import ElementTree from any known place\")\r\n\r\nimport json\r\n\r\n\r\nclass Test(unittest.TestCase):\r\n\r\n\r\n def setUp(self): \r\n\r\n # Use of testCases can be useful, but not now\r\n self.testCases = {}\r\n self.testCases['CaseName'] = {'pass': 'someName-value-or-datastruct',\r\n 'fail': 'someName-value-or-datastruct' } \r\n \r\n # Define some of the generic stuff\r\n self.ns = json.load(open(\"../namespaces.json\")) \r\n with open(\"testResources/test01/align.xml\", 'rt') as f:\r\n rdf = etree.parse(f)\r\n self.root = rdf.getroot()\r\n \r\n#\r\n#\r\n# All Tests\r\n#\r\n#\r\n \r\n def testAlignment(self):\r\n align = Alignment(self.root)\r\n assert align.xml == 'yes'\r\n assert align.about == \"http://oms.omwg.org/ontoA-ontoB/\"\r\n assert align.creator.text == 'PaulBrandt'\r\n assert align.date.text == '2015/08/25'\r\n assert align.method.text == 'manual'\r\n assert align.purpose.text == 'initial example for a simple Alignment'\r\n assert align.level == '2EDOAL'\r\n assert align.type.text == '?*' \r\n \r\n #TODO: Test for equality of ontology elements in Alignment\r\n# assert align.onto1 == \r\n# assert align.onto2 == \r\n assert len(align.corresp) == 1\r\n \r\n\r\n def testCorrespondence(self):\r\n align = self.root.find('xmlns:Alignment', self.ns)\r\n element = align.find('xmlns:map/xmlns:Cell', self.ns)\r\n assert not element == None\r\n # Create the correspondence from the element\r\n c = Correspondence(element)\r\n \r\n # Fist test: Establish correctness, i.e., all attributes of the Correspondence object\r\n assert c.about == \"MappingRule_0\"\r\n \r\n ent1 = element.find('xmlns:entity1', self.ns)\r\n # Remove all the spaces and newlines, and replace for None, otherwise the assertion fails\r\n ent1 = removeWS(ent1)\r\n if ent1.text =='' :\r\n ent1.text = None\r\n if ent1.tail =='' :\r\n ent1.tail = None\r\n assert c.entity1 == ent1\r\n \r\n ent2 = element.find('xmlns:entity2', self.ns)\r\n # Remove all the spaces and newlines, and replace for None, otherwise the assertion fails\r\n ent2 = removeWS(ent2)\r\n if ent2.text =='' :\r\n ent2.text = None\r\n if ent2.tail =='' :\r\n ent2.tail = None\r\n assert c.entity2 == ent2\r\n \r\n assert c.relation.text == 'Equivalence'\r\n assert c.measure.text == '1.0'\r\n assert not hasattr(c, 'transformation')\r\n assert not hasattr(c, 'linkkey')\r\n \r\n # Second test: must be able to handle None elements\r\n element = None\r\n with self.assertRaises(AttributeError):\r\n d = Correspondence(element)\r\n\r\n def tearDown(self):\r\n pass\r\n \r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testCorrespondence']\r\n unittest.main()","repo_name":"plbt5/EDOALParser","sub_path":"EDOALParser/TTDtests/unittest.py","file_name":"unittest.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12554867023","text":"from jeec_brain.values.value_composite import ValueComposite\n\n\nclass SquadMembersValue(ValueComposite):\n\tdef __init__(self, members):\n\t\tsuper(SquadMembersValue, self).initialize({})\n\t\tmembers_array = []\n\t\tfor member in members:\n\t\t\tmember_value = {\n\t\t\t\t\"name\": member.user.name,\n\t\t\t\t\"ist_id\": member.user.username,\n\t\t\t\t\"level\": member.level.value,\n\t\t\t\t\"photo\": 'data: ' + member.photo_type + ';base64, ' + member.photo,\n\t\t\t\t\"squad_points\": member.squad_points,\n\t\t\t\t\"is_captain\": member.is_captain()\n\t\t\t}\n\t\t\tmembers_array.append(member_value)\n\t\tself.serialize_with(data=members_array)\n","repo_name":"jose-correia/brain","sub_path":"jeec_brain/values/squad_members_value.py","file_name":"squad_members_value.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35365814073","text":"from sklearn.svm import LinearSVC, SVC\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, r2_score\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n#1. 데이터\nx_data = [0, 0], [0, 1], [1, 0], [1, 1]\ny_data = [0, 1, 1, 0]\n\n# 2. 모델\n# 실습 m02_5파일을 다층 레이어 구성해서 이 파일이 acc=1. 이 나오도록 구성\nmodel = Sequential() \nmodel.add(Dense(10, input_dim=2, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\nmodel.fit(x_data, y_data, batch_size=1, epochs=100)\n# 3. 훈련\n\n# 4. 평가, 예측\n# y_predict = model.predict(x_data)\n# print(x_data, \"의 예측 결과 : \", y_predict)\n\nresults = model.evaluate(x_data, y_data)\nprint('model.score : ', results)\ny_predict = model.predict(x_data, y_data)\ny_predict = np.argmax(y_predict)\nr2_score = r2_score(y_data, y_predict)\nprint('r2_score : ', r2_score)\n\n","repo_name":"marattang/ml_basic","sub_path":"m02_6_xor_keras2.py","file_name":"m02_6_xor_keras2.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28820137715","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n \n \n sentinels = ListNode()\n sentinell = ListNode()\n tails= sentinels\n taill=sentinell\n \n # pluck the node from original linked list\n # add =>x to taill node and rest to tails\n # tails.next = sentinell.next\n #increment the curr on orginal \n #return sentinels.next \n \n curr = head\n while curr:\n succ = curr.next\n curr.next = None\n #work to be done\n if curr.val opt.minmaskarea and size>opt.minsize and impro.Q_lapulase(img)>opt.quality:\n cnt +=1\n if cnt == opt.time:\n # print(second)\n timestamps.append(util.second2stamp(cut_point*opt.interval))\n util.writelog(os.path.join(opt.savedir,'opt.txt'),videopath+'\\n'+str(timestamps))\n #print(timestamps)\n\n #generate datasets\n print('Generate datasets...')\n for timestamp in timestamps:\n savecnt = '%05d' % result_cnt\n origindir = os.path.join(opt.savedir,savecnt,'origin_image')\n maskdir = os.path.join(opt.savedir,savecnt,'mask')\n util.makedirs(origindir)\n util.makedirs(maskdir)\n\n util.clean_tempfiles(opt)\n ffmpeg.video2image(videopath, opt.temp_dir+'/video2image/%05d.'+opt.tempimage_type,\n start_time = timestamp,last_time = util.second2stamp(opt.time))\n \n endtime = datetime.datetime.now()\n print(str(video_cnt)+'/'+str(len(videopaths))+' ',\n util.get_bar(100*video_cnt/len(videopaths),35),'',\n util.second2stamp((endtime-starttime).seconds)+'/'+util.second2stamp((endtime-starttime).seconds/video_cnt*len(videopaths)))\n\n imagepaths = util.Traversal(opt.temp_dir+'/video2image')\n imagepaths = sorted(imagepaths)\n imgs=[];masks=[]\n # mask_flag = False\n # for imagepath in imagepaths:\n # img = impro.imread(imagepath)\n # mask = runmodel.get_ROI_position(img,net,opt,keepsize=True)[0]\n # imgs.append(img)\n # masks.append(mask)\n # if not mask_flag:\n # mask_avg = mask.astype(np.float64)\n # mask_flag = True\n # else:\n # mask_avg += mask.astype(np.float64)\n\n # mask_avg = np.clip(mask_avg/len(imagepaths),0,255).astype('uint8')\n # mask_avg = impro.mask_threshold(mask_avg,20,64)\n # if not opt.all_mosaic_area:\n # mask_avg = impro.find_mostlikely_ROI(mask_avg)\n # x,y,size,area = impro.boundingSquare(mask_avg,Ex_mul=random.uniform(1.1,1.5))\n \n # for i in range(len(imagepaths)):\n # img = impro.resize(imgs[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC) \n # mask = impro.resize(masks[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC)\n # impro.imwrite(os.path.join(origindir,'%05d'%(i+1)+'.jpg'), img)\n # impro.imwrite(os.path.join(maskdir,'%05d'%(i+1)+'.png'), mask)\n ex_mul = random.uniform(1.2,1.7)\n positions = []\n for imagepath in imagepaths:\n img = impro.imread(imagepath)\n mask = runmodel.get_ROI_position(img,net,opt,keepsize=True)[0]\n imgs.append(img)\n masks.append(mask)\n x,y,size,area = impro.boundingSquare(mask,Ex_mul=ex_mul)\n positions.append([x,y,size])\n positions =np.array(positions)\n for i in range(3):positions[:,i] = filt.medfilt(positions[:,i],opt.medfilt_num)\n\n for i,imagepath in enumerate(imagepaths):\n x,y,size = positions[i][0],positions[i][1],positions[i][2]\n tmp_cnt = i\n while sizeopt.minsize//4:\n # if not opt.all_mosaic_area:\n # mask_avg = impro.find_mostlikely_ROI(mask_avg)\n # x,y,size,area = impro.boundingSquare(mask_avg,Ex_mul=ex_mul)\n # img = impro.resize(imgs[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC)\n # mask = impro.resize(masks[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC)\n # impro.imwrite(os.path.join(origindir,'%05d'%(i+1)+'.jpg'), img)\n # impro.imwrite(os.path.join(maskdir,'%05d'%(i+1)+'.png'), mask)\n\n\n result_cnt+=1\n\n except Exception as e:\n video_cnt +=1\n util.writelog(os.path.join(opt.savedir,'opt.txt'), \n videopath+'\\n'+str(result_cnt)+'\\n'+str(e))\n video_cnt +=1\n if opt.gpu_id != '-1':\n torch.cuda.empty_cache()\n","repo_name":"HypoX64/DeepMosaics","sub_path":"make_datasets/make_video_dataset.py","file_name":"make_video_dataset.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","stars":1768,"dataset":"github-code","pt":"81"} +{"seq_id":"25425863262","text":"import os\nimport subprocess\nimport glob\nimport itertools\n\njobs_to_run = glob.glob('../enumerate_states/validation_set/*_states.json')\n\nfor dir in jobs_to_run:\n name = dir.split('/')[-1]\n name = name[:-12]\n print(name)\n with open('fragment.lsf', 'r') as f:\n filedata = f.read()\n filedata = filedata.replace('JOB_NAME', name)\n\n bsub_file = os.path.join(os.getcwd(), '{}_fragment.lsf'.format(name))\n with open(bsub_file, 'w') as f:\n f.write(filedata)\n\n stdin_file = open(bsub_file, 'r')\n subprocess.Popen(['bsub'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin_file)\n stdin_file.close()\n","repo_name":"choderalab/fragmenter_data","sub_path":"combinatorial_fragmentation/pfizer/submit_fragment.py","file_name":"submit_fragment.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10775211525","text":"def solution(m, musicinfos):\n answer = dict()\n data = list(map(lambda x: x.split(','), musicinfos))\n for i in data:\n i[0:2] = [(int(i[1][:2]) - int(i[0][:2])) * 60 + int(i[1][3:]) - int(i[0][3:])]\n temp = list()\n for j in range(len(i[2]) - 1):\n if i[2][j] == '#':\n continue\n temp.append(i[2][j]) if i[2][j + 1] != '#' else temp.append(i[2][j:j+2])\n temp.append(i[2][-1]) if i[2][-1] != '#' else None\n i[2] = temp\n if i[0] < len(i[2]):\n i[2] = i[2][:i[0]]\n else:\n r = i[0] // len(i[2])\n l = i[0] % len(i[2])\n i[2] = i[2] * r + i[2][:l]\n for d in data:\n for find in range(len(d[2]) - (len(m) - m.count('#')) + 1):\n if ''.join(d[2][find:find+len(m) - m.count('#')]) == m:\n answer[d[1]] = d[0]\n name, time = '(None)', 0\n for n, t in answer.items():\n if time < t:\n name, time = n, t\n return name\n\n\n\n\n# mm = 'ABCDEFG'\n# mi = ['12:00,12:14,HELLO,CDEFGAB', '13:00,13:05,WORLD,ABCDEF']\n\nmm = 'CC#BCC#BCC#BCC#B'\nmi = ['03:00,03:30,FOO,CC#B', '04:00,04:12,BAR,CC#BCC#BCC#B']\n\n# mm = 'ABC'\n# mi = ['12:00,12:14,HELLO,C#DEFGAB', '13:00,13:05,WORLD,ABCDEF']\n\nprint(solution(mm, mi))\n","repo_name":"wnstjr9711/Study","sub_path":"프로그래머스/2018kakao/방금그곡.py","file_name":"방금그곡.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18462943520","text":"import os\nimport smtplib\nfrom email.message import EmailMessage\n\nfrom bs4 import BeautifulSoup\nfrom dotenv import load_dotenv\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\ndef amc_movies_alert(\n amc_theater_link: str, imdb_link: str = r\"https://www.imdb.com/\"\n) -> None:\n \"\"\"Main function to send email of movies, ratings, and descriptions\n\n Web scrape AMC and IMDB links and email\n yourself a list of movies with an IMDB rating of >= 7.0\n\n Parameters\n ------------\n amc_theater_link: str\n The link to your local AMC theater\n imdb_link: str\n The link to IMDB's homepage\n \"\"\"\n # get the driver for chrome browser\n driver = get_chrome_webdriver()\n\n # open AMC link first and scrape a list of all movies from the dropdown\n movies = get_amc_movies(driver, amc_theater_link)\n\n # get the ratings and descriptions of each movie in a dict\n movie_dict = get_imdb_data(movies, driver, imdb_link)\n\n # send email to desired recipient using movie_dict\n send_email_alert(movie_dict)\n\n\ndef get_chrome_webdriver() -> webdriver:\n \"\"\"Create chrome webdriver using selenium to web scrape AMC and IMDB links\n\n Return a webdriver\n \"\"\"\n options = Options()\n options.add_experimental_option(\"detach\", True)\n options.add_argument(\"--start-maximized\")\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n return driver\n\n\ndef get_amc_movies(driver: webdriver, amc_theater_link: str) -> list:\n \"\"\"Scrape a list of movies from the 'all movies\" dropdown on AMC's website\n\n Return a list\n \"\"\"\n driver.get(f\"{amc_theater_link}\")\n movie_dropdown = driver.find_element(By.ID, \"showtimes-movie-title-filter\")\n to_remove = [\"All Movies\", \"\"]\n # only grab the movies that don't have the above filter\n movies = [\n movie.text.strip()\n for movie in movie_dropdown.find_elements(By.TAG_NAME, \"option\")\n if movie.text not in to_remove\n ]\n # filter the list of movies further by removing duplicate movie names\n filtered_movies = []\n for i in range(len(movies)):\n # for example, we want 'guardians of the galaxy vol 3'\n # not 'guardians of the galaxy vol 3: private theater rental'\n if i == 0 or not movies[i].startswith(\n movies[i - 1]\n ): # This checks whether the current movie's name does not start\n # with the name of the previous movie in the list\n filtered_movies.append(movies[i])\n return filtered_movies\n\n\ndef get_imdb_data(movies: list, driver: webdriver, imdb_link: str) -> dict:\n \"\"\"Scrape the IMDB rating and description for each movie.\n\n Return a dictionary\n \"\"\"\n # open a new tab for IMDB link\n driver.execute_script(\"window.open('{}');\".format(imdb_link))\n window_after = driver.window_handles[1]\n driver.switch_to.window(window_after)\n\n # create empty dictionary to store\n # movie as key and rating/description as list of values\n movie_dict = {}\n # loop through each movie and scrape the rating and description\n for movie in movies:\n try:\n # click the search bar and enter the name of the movie\n search_bar = driver.find_element(By.ID, \"suggestion-search\")\n search_bar.click()\n search_bar.send_keys(f\"{movie}\")\n\n # click the search button\n search_button = driver.find_element(By.ID, \"suggestion-search-button\")\n search_button.click()\n\n # loop through each search result element\n # and check if the title of the movie matches\n for result in driver.find_elements(\n By.CLASS_NAME, \"ipc-metadata-list-summary-item__t\"\n ):\n if result.text.lower() == movie.lower():\n # click on the link to the movie's page\n driver.execute_script(\"arguments[0].click();\", result)\n break\n except NoSuchElementException:\n pass\n\n # get the page_source and parse the HTML using BeautifulSoup\n page_source = driver.page_source\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n # scrape the rating and description and insert to movie_dict\n try:\n rating = soup.find(\"span\", {\"class\": \"sc-bde20123-1 iZlgcd\"}).text.strip()\n description = soup.find(\n \"span\", {\"class\": \"sc-2eb29e65-0 hOntMS\"}\n ).text.strip()\n movie_dict[movie] = [rating, description]\n except AttributeError:\n pass\n driver.quit()\n return movie_dict\n\n\ndef send_email_alert(movie_dict: dict) -> None:\n \"\"\"Send email using movie_dict to construct message\"\"\"\n # Load the environment variables from the .env file\n load_dotenv()\n username = os.getenv(\"ACC_USERNAME\")\n password = os.getenv(\"PASSWORD\")\n recipient_email = os.getenv(\"RECIPIENT_EMAIL\")\n\n # Format the values as a string with line breaks and paragraphs\n movies_text = \"\"\n MOVIE_RATING_THRESHOLD = 7.0\n for movie, values in movie_dict.items():\n rating, description = values\n # only grab movies that have an IMDB rating of 7 or higher\n if float(rating) >= MOVIE_RATING_THRESHOLD:\n movie_text = f\"

    {movie}:

    IMDB Rating: {rating}

    Description: {description}

    \"\n movies_text += movie_text\n\n # construct email\n msg = EmailMessage()\n msg.add_alternative(movies_text, subtype=\"html\")\n msg[\"subject\"] = \"Movie Ratings and Descriptions\"\n msg[\"to\"] = recipient_email\n msg[\"from\"] = username\n\n # send email\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls() # setting gmail requires\n server.login(username, password)\n server.send_message(msg)\n server.quit\n\n\nif __name__ == \"__main__\":\n amc_theater_link: str = rf\"{os.getenv('AMC_LINK')}\"\n amc_movies_alert(amc_theater_link, imdb_link=r\"https://www.imdb.com/\")\n","repo_name":"agbulosk/Movies-Email-Alert","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24683660066","text":"# Author: Isabella Samuelsson\n# Date: 10/7/22\nimport sys\nfrom socket import *\n\n\"\"\"\nAuction client class. If you are the first to connect to the server you will be connected as a seller client, if not you \nwill be connected as a buyer client. Bids must be an integer greater than zero.\n\nSeller Client: Once connected you will be prompted to enter auction information. This includes Auction Type: 1 \nfor a first price auction and 2 for a second price auction, Minimum Bid price: non-negative integer, Number of Bidders: \nnon-negative integer less than 10 and Item Name: string. If you enter invalid auction information you will be prompted for valid information before continuing.\n\nBuyer Client: Once connected you will be prompted to enter a bid. A bid should be a non-negative integer, if an invalid \nbid is given you will be prompted again.\n\nRun example: python3 auc_client.py server_ip_address server_port_number\n\"\"\"\nclass auc_client:\n # default server name and port\n serverName = \"192.168.0.15\"\n serverPort = 12345\n\n \"\"\" Initializes server name and port from run command arguments and starts the main() function.\"\"\"\n def __init__(self):\n self.serverName = sys.argv[1]\n self.serverPort = int(sys.argv[2])\n self.main()\n\n \"\"\" \n Creates a connection to the auction server. \n - If you are the first to connect to the server you will be connected as a seller client, if not you will be \n connected as a buyer client. \n - If the client connects when the sever is busy setting up a seller connection or the server is busy handling \n bidding the client will receive a \"connect again later\" message and the client will close the socket and exit. \n \"\"\"\n def main(self):\n clientSocket = socket(AF_INET, SOCK_STREAM)\n clientSocket.connect((self.serverName, self.serverPort)) # client connection\n\n client_status_msg = clientSocket.recv(1024).decode()\n if \"connect again later\" in client_status_msg: # if server sends a busy msg disconnect and exit\n print(client_status_msg)\n clientSocket.close()\n exit()\n if \"Seller\" in client_status_msg: # if client is a buyer prompt for auction info\n auc_info_msg = input(client_status_msg)\n clientSocket.send(auc_info_msg.encode())\n received_auc_info = clientSocket.recv(1024).decode()\n\n while \"Invalid\" in received_auc_info: # if invalid auction info given prompt again\n new_auc_info = input(received_auc_info)\n clientSocket.send(new_auc_info.encode())\n received_auc_info = clientSocket.recv(1024).decode()\n\n print(received_auc_info)\n\n auction_finished = clientSocket.recv(1024).decode()\n print(auction_finished)\n\n else: # if client is a seller wait for bid start\n print(client_status_msg)\n did_bid_start = clientSocket.recv(1024).decode()\n if \"waiting\" in did_bid_start:\n print(did_bid_start)\n did_bid_start = clientSocket.recv(1024).decode()\n\n bid = input(did_bid_start) # at bid start prompt for bid\n clientSocket.send(bid.encode())\n received_bid = clientSocket.recv(1024).decode()\n\n while \"Invalid\" in received_bid: # if bid is invalid prompt again\n new_bid = input(received_bid)\n clientSocket.send(new_bid.encode())\n received_bid = clientSocket.recv(1024).decode()\n\n print(received_bid)\n\n auction_finished = clientSocket.recv(1024).decode() # print auction result and disconnect\n print(auction_finished)\n\n clientSocket.close()\n\n\"\"\" Creates a client object. \"\"\"\nif __name__ == \"__main__\":\n client = auc_client()\n\n","repo_name":"insamuel/Auction_System","sub_path":"auc_client.py","file_name":"auc_client.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19514687792","text":"\n\"\"\"\n Manages Database Connection\n\"\"\"\n\n\nfrom pathlib import Path\nimport sqlite3\nimport textwrap\n\n\ndef create_connection(db_file: Path) -> sqlite3.Connection:\n \"\"\"\n Create a database connection to a SQLite database specified by db_file\n \"\"\"\n return sqlite3.connect(db_file)\n\n\ndef execute_script(db_conn: sqlite3.Connection, sql_script: str) -> sqlite3.Cursor:\n \"\"\"\n Execute an SQL script\n \"\"\"\n sql_script = textwrap.dedent(sql_script)\n return db_conn.executescript(sql_script)\n\n\ndef execute_select(db_conn: sqlite3.Connection, sql_query: str) -> list:\n \"\"\"\n Execute an SQL SELECT statement\n \"\"\"\n sql_query = textwrap.dedent(sql_query)\n db_conn.row_factory = sqlite3.Row\n query_result = db_conn.execute(sql_query)\n field_names = [desc[0] for desc in query_result.description]\n result_table = [\n {field: row[field] for field in field_names}\n for row in query_result\n ]\n return result_table\n","repo_name":"rommat/my-library","sub_path":"db_connection.py","file_name":"db_connection.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20647987569","text":"import requests # para requisições http\r\nimport json # para gerar JSON a partir de objetos do Python\r\nfrom bs4 import BeautifulSoup # BeautifulSoup é uma biblioteca Python de extração de dados de arquivos HTML e XML.\r\nimport re\r\nimport time\r\nfrom sports import popular_sports\r\n\r\n# Press the green button in the gutter to run the script.\r\n\r\ndef analisaSite(site_analisado, lista_resultados, headers):\r\n try:\r\n requisicaoDePagina = requests.get(site_analisado, headers=headers)\r\n\r\n conteudo = requisicaoDePagina.content\r\n\r\n site = BeautifulSoup(conteudo, 'html.parser', from_encoding='iso-8859-1')\r\n\r\n esporte = site.find_all('h3')\r\n\r\n\r\n pattern = r'(\\d+)[\\s\\W]*(' + '|'.join(map(re.escape, popular_sports)) + r')\\b'\r\n\r\n comp = re.compile(pattern, re.IGNORECASE)\r\n\r\n lista_sports_do_site = []\r\n\r\n for i in esporte:\r\n matches = re.findall(comp, str(i))\r\n if (len(matches) > 0):\r\n lista_sports_do_site.append(matches[0])\r\n\r\n if len(lista_sports_do_site) > 2:\r\n lista_resultados[site_analisado] = lista_sports_do_site\r\n\r\n sites_filhos = [a.get('href') for a in site.find_all('a') if a.get('href') and a.get('href').startswith('http')]\r\n\r\n j = 0\r\n\r\n while j < len(sites_filhos):\r\n\r\n if sites_filhos[j] in lista_resultados.keys() or sites_filhos[j].find(' ') > 0 or sites_filhos[j].find('.com') < 0:\r\n sites_filhos.pop(j)\r\n j -= 1\r\n j += 1\r\n\r\n return sites_filhos\r\n\r\n except requests.exceptions.RequestException:\r\n return []\r\n\r\n\r\nif __name__ == '__main__':\r\n file1 = open('out.json', 'w')\r\n tempo_inicial = time.time()\r\n lista_sites_mundo = ['https://sportsmonkie.com/most-popular-sports/',\r\n 'https://www.thetealmango.com/sports/most-popular-sport-in-the-world/',\r\n 'https://sportytell.com/sports/most-popular-sports-world/']\r\n\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/118.0'}\r\n lista_resultados = dict()\r\n\r\n sites_pais = lista_sites_mundo\r\n\r\n try:\r\n\r\n while True:\r\n sites_filhos = []\r\n\r\n for site_analisado in sites_pais:\r\n\r\n sites_filhos += analisaSite(site_analisado, lista_resultados, headers)\r\n\r\n sites_pais = sites_filhos\r\n\r\n except:\r\n tempo_final = time.time()\r\n print('O tempo de processamento foi de ' + str((tempo_final - tempo_inicial)//1) + ' segundos')\r\n print(lista_resultados)\r\n file1.write(json.dumps(lista_resultados, indent=4))\r\n\r\n\r\n","repo_name":"Boreias/INE5454-Web_Crawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17613756250","text":"import dbus\nimport logging\nfrom dbus.service import method as dbus_method\nfrom errors import InvalidArgsException, NotSupportedException\nfrom constants.dbus_interfaces import DBUS_OM_IFACE\n\n\nclass Application(dbus.service.Object):\n \"\"\"\n org.bluez.GattApplication1 interface implementation\n \"\"\"\n\n def __init__(self, bus):\n self.path = '/'\n self.services = []\n dbus.service.Object.__init__(self, bus, self.path)\n\n def get_path(self):\n return dbus.ObjectPath(self.path)\n\n def add_service(self, service):\n self.services.append(service)\n\n @dbus_method(DBUS_OM_IFACE, out_signature='a{oa{sa{sv}}}')\n def GetManagedObjects(self):\n response = {}\n logging.info('GetManagedObjects')\n\n for service in self.services:\n response[service.get_path()] = service.get_properties()\n chrcs = service.get_characteristics()\n for chrc in chrcs:\n response[chrc.get_path()] = chrc.get_properties()\n descs = chrc.get_descriptors()\n for desc in descs:\n response[desc.get_path()] = desc.get_properties()\n\n return response\n","repo_name":"CESARBR/knot-gateway-netsetup","sub_path":"netsetup/ble_util/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73461058826","text":"test_output = 'C:/Users/Patrick/Downloads/spider_google_play/test_output.txt'\nnew_test = 'C:/Users/Patrick/Downloads/spider_google_play/new_test.txt'\n\nimport random\n\nif __name__ == '__main__':\n test = open(test_output)\n ntest = open(new_test, 'w')\n\n pos = []\n neg = []\n reviews = []\n for line in test.readlines():\n if line[0] == '1' or line[0] == '2':\n neg.append(line)\n elif line[0] != '3':\n pos.append(line)\n n = 0\n if len(pos) > len(neg):\n n = len(neg)\n else:\n n = len(pos)\n\n for i in range(n):\n reviews.append(pos[i])\n reviews.append(neg[i])\n\n random.shuffle(reviews)\n\n ntest.write(str(float(n)) + ' ' + str(float(n)) + '\\n')\n\n for review in reviews:\n ntest.write(review)\n\n test.close()\n ntest.close()","repo_name":"foodvac/TestBird","sub_path":"generate_test.py","file_name":"generate_test.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26997103027","text":"#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/3 17:42\n# @File : conftest.py \n# @Author : 黄权权\n# @Software: PyCharm\n# @Desc : None\nimport pytest\nfrom pylib.APIlib.contractsAPI import ContractsAPI\n\n\n@pytest.fixture(scope=\"session\")\ndef init_contracts(admin_login, init_accounts, init_contractTypes, init_organiz):\n \"\"\"\n 初始化创建一个购房合同\n :param admin_login: 提供cookies信息\n :param init_accounts: 提供签约对象信息\n :param init_contractTypes: 提供合同分类信息\n :param init_organiz: 提供部门信息\n :return:\n \"\"\"\n contracts_api = ContractsAPI(admin_login)\n new_contract = contracts_api.add(name=\"购房合同\",\n amount=50000,\n othercompany=init_accounts[1][\"_id\"],\n contract_type=init_contractTypes[1][\"_id\"],\n company_id=init_organiz[1][\"_id\"])\n yield contracts_api, new_contract\n contracts_api.delete(new_contract[\"_id\"])\n","repo_name":"Hquanquan/Auto_project_testing_framework","sub_path":"testcase/API接口测试用例/D-管理员登录/D-销售部/D-签约对象-VIP客户/D-合同类型-房屋合同/D-购房合同/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31517535045","text":"import random\nimport math\nimport copy\n\nCOMPUTER_VARIABLE = 0\nPLAYER_VARIABLE = 2\nAMBIGUOUS = 1\nINFINITE_VALUE = 100000000000000000000000000000000000000\ndef drawgrid(board, game_size):\n\tprint(\"-\" * ((4 * game_size) + 1))\n\tfor i in range(game_size):\n\t\tprint(\"|\", end = \"\", flush=True)\n\t\tfor j in range(game_size):\n\t\t\tprint(\"\" , board[i][j] , \"|\" , end=\"\" , flush=True)\n\t\tprint(\"\")\n\t\tprint(\"-\" * ((4 * game_size) + 1))\n\n\n#for x in range(game_size):\n# if board[x]\n#print()\n\t\n\ndef check_state(board, game_size):\n\tgame_on = True\n\tcomputer_win = 0\n\tdiagonalup = 0\n\tdiagonaldown = 0\n\tstring_board = str(board)\n\tfor m in range(game_size):\n\t\thorizontal = (sum(board[m]))\n\t\tif horizontal == 0:\n\t\t\tcomputer_win = 1\n\t\t\tgame_on = False\n\t\telif horizontal == game_size*2:\n\t\t\tgame_on = False\n\tfor m in range(game_size):\n\t\tvertical = (sum(i[m] for i in board))\n\t\tif vertical == 0:\n\t\t\tcomputer_win = 1\n\t\t\tgame_on = False\n\t\telif vertical == game_size*2:\n\t\t\tgame_on = False\n\t\t\tdiagonal = 0\n\tfor m in range(game_size):\n\t\tdiagonalup += board[m][m]\n\tif diagonalup == 0:\n\t\tcomputer_win = 1\n\t\tgame_on = False\n\telif diagonalup == game_size*2:\n\t\tgame_on = False\n\tfor m in range(game_size):\n\t\tdiagonaldown += board[m][game_size-1-m]\n\tif diagonaldown == 0:\n\t\tcomputer_win = 1\n\t\tgame_on = False\n\telif diagonaldown == game_size*2:\n\t\tgame_on = False\n\tif \"1\" not in string_board:\n\t\tcomputer_win = 2\n\t\tgame_on = False\n\t\tprint(board)\n\treturn game_on, computer_win\n\t\ndef check_game_win_tree(new_state, game_size):\n\tcomputer_win = 2\n\tcomputer_has_won = -3\n\tcomputer_has_lost = 3\n\trows_to_check=[0]*game_size\n\tcolumns_to_check=[0]*game_size\n\tdiagonal_down_to_check_computer = 0\n\tdiagonal_up_to_check_computer = 0\n\tdiagonal_down_to_check_player = 0\n\tdiagonal_up_to_check_player = 0\n\tfor x,y in new_state[0]: #subtracts one from each row/column value per times a player or computer has moved there\n\t\trows_to_check[y] -= 1\n\t\tcolumns_to_check[x] -= 1\n\tfor x,y in new_state[2]:\n\t\trows_to_check[y] += 1\n\t\tcolumns_to_check[x] += 1\n\tif computer_has_lost in rows_to_check:\n\t\tcomputer_win = PLAYER_VARIABLE+1\n\tif computer_has_won in rows_to_check:\n\t\tcomputer_win = COMPUTER_VARIABLE+1\n\tif computer_has_lost in columns_to_check:\n\t\tcomputer_win = PLAYER_VARIABLE+1\n\tif computer_has_won in columns_to_check:\n\t\tcomputer_win = COMPUTER_VARIABLE+1\n\t\t\n\tfor x,y in new_state[0]:\n\t\tif x == y:\n\t\t\tdiagonal_down_to_check_computer+=1\n\t\tif x == 2-y:\n\t\t\tdiagonal_up_to_check_computer+=1\n\tfor x,y in new_state[2]:\n\t\tif x == y:\n\t\t\tdiagonal_down_to_check_player+=1\n\t\tif x == 2-y:\n\t\t\tdiagonal_up_to_check_player+=1\n\t\t\t#######################################################\n\tif diagonal_down_to_check_computer == 3:\n\t\tcomputer_win = COMPUTER_VARIABLE+1\n\tif diagonal_up_to_check_computer == 3:\n\t\tcomputer_win = COMPUTER_VARIABLE+1\n\tif diagonal_down_to_check_player == 3:\n\t\tcomputer_win = PLAYER_VARIABLE+1\n\tif diagonal_up_to_check_player == 3:\n\t\tcomputer_win = PLAYER_VARIABLE+1\n\t\t\n\treturn computer_win\n\n\ndef build_tree(board, game_size):\n\tcomputer_value = COMPUTER_VARIABLE # TODO\n\tplayer_value = PLAYER_VARIABLE\n\t\n\n\tboard_as_dict = {0:[], 1:[], 2:[]}\n\tlinks_dict = {}# shows the possible moves that can be made after this board.\n\tstates_dict = {}# the dictionary for all of the actual boards stored in numbers(nodes)\n\tplayer_turn_to_move= {}\n\tstate_index_counter = 0\n\tstack = []\n\t# make the initial state and add it to states_dict\n\tfor y in range(game_size):\n\t\tfor x in range(game_size):\n\t\t\tcell_value = board[y][x]\n\t\t\tboard_as_dict[cell_value].append((x,y))#board_as_dict is now a dict with the positions of the boxes on the values that they have.\n\tstates_dict[state_index_counter] = board_as_dict\n\tstack.append(state_index_counter)\n\tplayer_turn_to_move[state_index_counter] = computer_value\n\tlinks_dict[state_index_counter] = []\n\n\tstate_index_counter += 1 # same as state_index_counter = state_index_counter + 1\n\n\t# build the tree from the initial state (i.e. populate links_dict and states_dict)\n\twhile len(stack) > 0:\n\t\tstate_index = stack.pop()\n\t\tcurr_player = player_turn_to_move[state_index]\n\t\tcurr_state = states_dict[state_index]\n\t\tcomputer_win_in_tree = check_game_win_tree(curr_state, game_size)\n\t\tif computer_win_in_tree == 3 or computer_win_in_tree == 1:\n\t\t\tcontinue\n\t\tfor free_coordinate in curr_state[1]:\n\t\t\tnew_state = copy.deepcopy(curr_state)\n\t\t\t#move free_coordinate from [1] to [curr_player]\n\t\t\tnew_state[1].remove(free_coordinate)\n\t\t\tnew_state[curr_player].append(free_coordinate)\n\t\t\t\n\t\t\t# link to parent\n\t\t\tlinks_dict[state_index].append(state_index_counter)\n\t\t\t\n\t\t\t# initalise all the data for the new state\n\t\t\tstates_dict[state_index_counter] = new_state\n\t\t\tlinks_dict[state_index_counter] = []\n\t\t\tplayer_turn_to_move[state_index_counter] = 2 - curr_player\n\t\t\t\n\t\t\t# tidy up/preparefor next iteration of the 2 enclosing loops\n\t\t\t\n\t\t\tstack.append(state_index_counter)\n\t\t\t\n\t\t\tstate_index_counter += 1\n\t\t\t\n\n\n\treturn links_dict, states_dict, player_turn_to_move\n\n\t\t\n\t\ndef postorder(links_dict, states_dict, player_turn_to_move, minimax_tree, current_node, game_size):\n\tfor child in links_dict[current_node]:\n\t\tpostorder(links_dict, states_dict, player_turn_to_move, minimax_tree, child, game_size)\n\t#append value of current node to the minimax tree\n\tif len(links_dict[current_node]) == 0:\n\t\tgame_winning_state = check_game_win_tree(states_dict[current_node],game_size)\n\t\tminimax_tree[current_node] = 0\n\t\tif game_winning_state == PLAYER_VARIABLE+1:\n\t\t\tminimax_tree[current_node]+=1\n\t\telif game_winning_state == COMPUTER_VARIABLE+1:\n\t\t\tminimax_tree[current_node]-=1\n\telse:\n\t\tif player_turn_to_move[current_node] == COMPUTER_VARIABLE:\n\t\t\tminimum_constituents = links_dict[current_node]\n\t\t\tgroup_of_children=[]\n\t\t\tfor individual_child in minimum_constituents:\n\t\t\t\tgroup_of_children.append(minimax_tree[individual_child])\n\t\t\tminimax_tree[current_node] = min(group_of_children)\n\t\telse:\n\t\t\tmaximum_constituents = links_dict[current_node]\n\t\t\tgroup_of_children = []\n\t\t\tfor individual_child in maximum_constituents:\n\t\t\t\tgroup_of_children.append(minimax_tree[individual_child])\n\t\t\tminimax_tree[current_node] = max(group_of_children)\n####values represent the worth to the player where positive is player win and negative is computer win\n\ndef get_computer_move(board, game_size):\n\t#the players piece will always be two and the computer is 0\n\tcomputer_value = 0\n\tplayer_value = 2\n\t# DFS - build the tree\n\tlinks_dict, states_dict, player_turn_to_move = build_tree(board, game_size)\n\tminimax_tree = get_values(links_dict, states_dict, player_turn_to_move, game_size)\n\tnew_children = links_dict[0]\n\tsmallest_child = INFINITE_VALUE\n\tsmallest_child_value = INFINITE_VALUE\n\tfor child in new_children:\n\t\tif minimax_tree[child]<=smallest_child_value:\n\t\t\tsmallest_child = child\n\t\t\tsmallest_child_value = minimax_tree[smallest_child]\n\tif smallest_child == INFINITE_VALUE:\n\t\tquit()\n\tnew_board_in_states = states_dict[smallest_child]\n\tnew_board = [[1,1,1],[1,1,1],[1,1,1]]\n\tfor player in range(3):\n\t\tfor x,y in new_board_in_states[player]:\n\t\t\tnew_board[y][x] = player\n\treturn new_board\n\n\t\n\t\n\n\n\ndef get_values(links_dict, states_dict, player_turn_to_move, game_size):\n\tcurrent_node = 0\n\tminimax_tree = {}\n\tpostorder(links_dict, states_dict, player_turn_to_move, minimax_tree, current_node, game_size)\n\treturn minimax_tree\n\t\t\t\ndef main():\t\n\tgame_on = True\n\tcomputer_win = 0\n\t#0 for computer 1 for player 2 for actual computer ai\n\tplayer_turn = int(input(\"Do you wanna go first? Type 1 for yes, 0 for no\\n\"))\n\tgame_size = int(input(\"What is the size of the grid that you want to play on?\\n\"))\n\tboard = []\n\tfor x in range(game_size):\n\t\tboard.append([1] * game_size)\n\tdrawgrid(board, game_size)\n\twhile game_on:\n\t\tif player_turn == 1:\n\t\t\ta, b = input(\"Your turn! Please type your position in this format x,y\\n\").replace(\" \", \"\").split(\",\")\n\t\t\tx, y = int(a), int(b)\n\t\t\tboard[game_size-y-1][x] = 2\n\t\t\tplayer_turn = 2\n\t\telif player_turn == 0:\n\t\t\tcorrect_turn = 0\n\t\t\tprint(\"My turn!\")\n\t\t\twhile correct_turn == 0: #optimisation to randomise first move\n\t\t\t\ty = random.randint(0,2)\n\t\t\t\tx = random.randint(0,2)\n\t\t\t\tif board[game_size-y-1][x] == 1:\n\t\t\t\t\tboard[game_size-y-1][x] = 0\n\t\t\t\t\tcorrect_turn = 1\n\t\t\tplayer_turn = 1\n\t\telif player_turn == 2: #minimax here\n\t\t\tboard = get_computer_move(board, game_size)\n\t\t\tplayer_turn = 1\n\t\tdrawgrid(board, game_size)\n\t\tgame_on, computer_win = check_state(board, game_size)\n\n\tif computer_win == COMPUTER_VARIABLE+1: #1\n\t\tprint(\"I win!\")\n\telif computer_win == AMBIGUOUS+1: #2\n\t\tprint(\"It's a draw!\")\n\telif computer_win ==PLAYER_VARIABLE+1: #3\n\t\tprint(\"You win!\")\n\nmain()\t\t\t\n\n","repo_name":"tigerater/minimax","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4259714022","text":"from django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom requests.exceptions import HTTPError, Timeout\n\nfrom datahub.core.api_client import APIClient, HawkAuth\nfrom datahub.core.exceptions import APIBadGatewayException\n\n\nclass ExportWinsAPIError(Exception):\n \"\"\"\n Base exception class for Export Wins API related errors.\n \"\"\"\n\n\nclass ExportWinsAPIHTTPError(ExportWinsAPIError):\n \"\"\"\n Exception for all HTTP errors.\n \"\"\"\n\n\nclass ExportWinsAPITimeoutError(ExportWinsAPIError):\n \"\"\"\n Exception for when a timeout was encountered when connecting to Export Wins API.\n \"\"\"\n\n\nclass ExportWinsAPIConnectionError(ExportWinsAPIError):\n \"\"\"\n Exception for when an error was encountered when connecting to Export Wins API.\n \"\"\"\n\n\ndef fetch_export_wins(match_ids, request=None):\n \"\"\"\n Queries the Export Wins API with the given list of match ids.\n Export Wins API takes either a single match id or comma separated\n list of match ids.\n \"\"\"\n if not all([\n settings.EXPORT_WINS_SERVICE_BASE_URL,\n settings.EXPORT_WINS_HAWK_ID,\n settings.EXPORT_WINS_HAWK_KEY,\n ]):\n raise ImproperlyConfigured('The all EXPORT_WINS_SERVICE* setting must be set')\n\n match_ids_str = ','.join(list(map(str, match_ids)))\n\n api_client = APIClient(\n api_url=settings.EXPORT_WINS_SERVICE_BASE_URL,\n auth=HawkAuth(settings.EXPORT_WINS_HAWK_ID,\n settings.EXPORT_WINS_HAWK_KEY),\n raise_for_status=True,\n default_timeout=settings.DEFAULT_SERVICE_TIMEOUT,\n )\n\n return api_client.request(\n 'GET',\n f'wins/match?match_id={match_ids_str}',\n timeout=3.0,\n )\n\n\ndef get_export_wins(match_ids, request=None):\n \"\"\"\n Get all export wins for all given company match_ids.\n\n `match_ids` is a list of match ids from Company matchin service.\n Raises exception an requests.exceptions.HTTPError for status, timeout and a connection error.\n \"\"\"\n try:\n response = fetch_export_wins(match_ids, request)\n except APIBadGatewayException as exc:\n error_message = 'Export Wins API service unavailable'\n raise ExportWinsAPIConnectionError(error_message) from exc\n except Timeout as exc:\n error_message = 'Encountered a timeout interacting with Export Wins API'\n raise ExportWinsAPITimeoutError(error_message) from exc\n except HTTPError as exc:\n error_message = (\n 'The Export Wins API returned an error status: '\n f'{exc.response.status_code}',\n )\n raise ExportWinsAPIHTTPError(error_message) from exc\n return response\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/company/export_wins_api.py","file_name":"export_wins_api.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"37660252260","text":"import logging\n\ndef setup(client):\n\n # Logs do terminal do bot:\n logger = logging.getLogger('discord')\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(filename='terminal.log', encoding='utf-8', mode='w')\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\n logger.addHandler(handler)","repo_name":"r47orr/fenix-robotica","sub_path":"terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19186620951","text":"class Solution:\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype list[list[str]]\n \"\"\"\n answers = []\n self.backtrack([], s, 0, answers)\n return answers\n\n def backtrack(self, so_far, s, start, answers):\n if start == len(s):\n answers.append(so_far.copy())\n else:\n for i in range(start, len(s)):\n if self.is_palindrome(s, start, i):\n so_far.append(s[start:i+1])\n self.backtrack(so_far, s, i + 1, answers)\n so_far.pop()\n\n def is_palindrome(self, s, low, high):\n while low < high:\n if s[low] != s[high]:\n return False\n low += 1\n high -= 1\n return True\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.partition('aab'))","repo_name":"pololee/oj-leetcode","sub_path":"problems/p131/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7216962914","text":"import os\n\nimport seaborn as sns\n\nfrom EnvQ import ACTION_HIGH, STATE_SIZE\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\n\n\ndef plot_combination(dict_, tag=\"\", type=\"scatter\", default_folder=\"images\", scale='normal'):\n if type == \"scatter\":\n sns.set_theme(style=\"whitegrid\")\n sns.set_palette(\"Set2\")\n else:\n sns.set_theme(style=\"darkgrid\", font='Latin Modern Roman')\n sns.set_palette(\"husl\")\n for key in dict_:\n y = list(range(len(dict_[key])))\n if type == \"scatter\":\n plt.scatter(y, dict_[key], label=key)\n else:\n plt.plot(y, dict_[key], label=key)\n plt.tight_layout() \n if type == \"scatter\":\n plt.xticks(fontsize=8, rotation=45)\n plt.yticks(fontsize=8, rotation=45) \n plt.legend(prop={'size': 8})\n plt.title(tag, fontweight=\"bold\")\n fig1 = plt.gcf()\n ax = plt.gca()\n if scale == 'log':\n ax.set_yscale('log')\n # ax.set_xscale('log') \n plt.show()\n if len(tag) >0:\n if not os.path.exists(default_folder):\n os.makedirs(default_folder)\n fig1.savefig(default_folder+\"/\"+tag+\"_\"+type+'.png')\n \n \ndef plot_x_y(x, y, type=\"scatter\", tag=\"\", scale='normal', default_folder=\"images\"):\n if type == \"scatter\":\n sns.set_theme(style=\"whitegrid\")\n sns.set_palette(\"Set2\")\n else:\n sns.set_theme(style=\"darkgrid\", font='Latin Modern Roman')\n sns.set_palette(\"husl\")\n if type == \"scatter\":\n plt.scatter(x, y)\n else:\n plt.plot(x, y)\n plt.tight_layout() \n if type == \"scatter\":\n plt.xticks(fontsize=8, rotation=45)\n plt.yticks(fontsize=8, rotation=45) \n # plt.legend(prop={'size': 8})\n plt.title(tag, fontweight=\"bold\")\n fig1 = plt.gcf()\n ax = plt.gca()\n if scale == 'log':\n ax.set_xscale('log')\n plt.show()\n if len(tag) >0:\n if not os.path.exists(default_folder):\n os.makedirs(default_folder)\n fig1.savefig(default_folder+\"/\"+tag+\"_\"+type+'.png')\n\ndef plot_q(q, tag=\"\"):\n action_highs = q[:, 1]\n action_lows = q[:, 0]\n y = list(range(len(action_highs)))\n # plot lines\n plt.plot(y, action_highs, label=\"High Action\")\n plt.plot(y, action_lows, label=\"Low Action\")\n plt.legend()\n plt.title(tag)\n plt.show()\n\n\ndef plot_dict(a, tag=\"\"):\n sns.set_theme()\n od = OrderedDict(sorted(a.items()))\n plt.bar(range(len(od)), od.values())\n plt.title(tag)\n plt.show()\n \n\n\ndef plot_list(a, tag=\"\"):\n plt.bar(range(len(a)), a)\n plt.title(tag)\n plt.show()\n\n\ndef plot_policy(policy, label=\"\", tag=\"\", type=\"A\", default_folder=\"images\"):\n q_high = [row[ACTION_HIGH] for row in policy]\n y_line = list(range(STATE_SIZE))\n if type == \"A\":\n sns.set_theme(style=\"whitegrid\")\n sns.set_palette(\"Set2\")\n else:\n sns.set_theme(style=\"darkgrid\", font='Latin Modern Roman')\n sns.set_palette(\"husl\") \n plt.scatter(y_line, q_high, alpha=0.9, label=label) \n plt.legend()\n plt.title(tag)\n fig1 = plt.gcf()\n plt.show()\n if len(tag) >0:\n if not os.path.exists(default_folder):\n os.makedirs(default_folder)\n fig1.savefig(default_folder+\"/\"+tag+\"_\"+type+'.png')\n \n\n\ndef plot_difference(v1, v2, tag=\"\"):\n zip_object = zip(v1, v2)\n difference = []\n for v_1, v_2 in zip_object:\n difference.append(v_1 - v_2)\n plt.bar(range(len(difference)), difference)\n plt.title(tag)\n plt.show()\n\nif __name__ == '__main__':\n d = {'help': [-663.3323, -663.4832, -1150.8296],\n 'im not ok': [1.e-02, 1.e+00, 1.e+02]\n }\n\n plot_combination(d, tag=\"Ayuda\", type=\"line\", scale=\"log\")\n # plot_combination(d, tag=\"Ayuda\")","repo_name":"aakash94/UPFRL","sub_path":"src/hw3/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73218617545","text":"def fizz_buzz(nums):\n \"\"\"\n Classic FizzBuzz: multiples of 3: Fizz, multiples of 5: Buzz\n \"\"\"\n fizz_buzz = {3: \"Fizz\", 5: \"Buzz\"}\n out = []\n for n in nums:\n ans = \"\"\n for i, v in fizz_buzz.items():\n if n % i == 0:\n ans += v\n out.append(ans if ans else n)\n return out\n\n\ndef decomp(num):\n while num:\n yield num % 10\n num //= 10\n\n\ndef num_dig(r, n):\n \"\"\" Returns count of numbers from 1 to n which contain digit r. \"\"\"\n return sum(str(r) in str(i) for i in range(1, n+1))\n\n\n\nprint(num_dig(6, 600000))\n","repo_name":"arcaputo3/algorithms","sub_path":"algos_and_data_structures/fizz_buzz.py","file_name":"fizz_buzz.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20991551591","text":"\"\"\"\n给定一个二叉树,判断其是否是一个有效的二叉搜索树。\n\n假设一个二叉搜索树具有如下特征:\n\n节点的左子树只包含小于当前节点的数。\n节点的右子树只包含大于当前节点的数。\n所有左子树和右子树自身必须也是二叉搜索树。\n示例 1:\n\n输入:\n 2\n / \\\n 1 3\n输出: true\n示例 2:\n\n输入:\n 5\n / \\\n 1 4\n  / \\\n  3 6\n输出: false\n解释: 输入为: [5,1,4,null,null,3,6]。\n  根节点的值为 5 ,但是其右子节点值为 4 。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/validate-binary-search-tree\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n # 60ms 16.1MB\n # 递归 注意 根节点的处理以及节点值是0的处理\n if not root:\n return True\n\n return self.vaild_tree(root.left, left_val=None, right_val=root.val) and \\\n self.vaild_tree(root.right, left_val=root.val, right_val=None)\n\n def vaild_tree(self, root, left_val=None, right_val=None):\n if not root:\n return True\n\n if (left_val != None and root.val <= left_val) or (right_val != None and root.val >= right_val):\n return False\n\n return self.vaild_tree(root.left, left_val, root.val) and \\\n self.vaild_tree(root.right, root.val, right_val)\n\n\nroot = TreeNode(0)\n# left = TreeNode(1)\nright = TreeNode(-1)\n# root.left = left\nroot.right = right\n\n\ntest = Solution()\nprint(test.isValidBST(\n root\n))","repo_name":"flashlightli/math_question","sub_path":"leetcode_question/mid_question/98_Validate_Binary_Search_Tree.py","file_name":"98_Validate_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13378647158","text":"M = [[1,0,0],[1,1,1],[1,2,4]]\n#Calculates the inverse for a specified square matrix M.\n\ndef mslice(M, r, c):\n N = []\n for row in M:\n newrow = row[::]\n N.append(newrow)\n N.pop(r)\n for column in N:\n column.pop(c)\n return N\n\ndef is_square(M):\n square = True\n for row in M:\n if len(row) != len(M):\n square = False\n return square\n\ndef det(M):\n result = 0\n if is_square(M):\n if len(M) == 1:\n return M[0][0]\n else:\n for i in range(0,len(M[0])):\n result = result + pow(-1,i)*M[0][i]*det(mslice(M,0,i))\n else:\n print(\"Not a square matrix!\")\n result = None\n return result\n\ndef deepcopy(M):\n N = []\n for i in range(0,len(M)):\n N.append([])\n for j in range(0,len(M[i])):\n N[i].append(M[i][j])\n return N\n\ndef is_rectangle(M):\n rect = True\n row_length = len(M[0])\n for i in range(0, len(M)):\n if (len(M[i]) != row_length):\n rect = False\n return rect\n\ndef transpose(M):\n T = []\n if (is_rectangle(M)):\n column_length = len(M[0])\n row_length = len(M)\n for i in range (0, column_length):\n T.append([])\n for j in range (0, row_length):\n T[i].append(M[j][i])\n else:\n print(\"Non-rectangular matrix!\")\n return T\n\ndef inverse(M):\n N = []\n for i in range(0,len(M)):\n N.append([])\n for j in range(0,len(M[i])):\n N[i].append(det(mslice(M,i,j))/det(M)*pow(-1,(i+j)))\n N = transpose(N)\n return N\n\nprint(inverse(M))\n","repo_name":"ethanjensen/Python-Programs-Linear-Algebra","sub_path":"inverse function.py","file_name":"inverse function.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6428346376","text":"'''\nProblem: You are given an unordered array consisting of consecutive integers E [1, 2, 3, ..., n] without any duplicates. You are allowed to\n swap any two elements. You need to find the minimum number of swaps required to sort the array in ascending order.\n For example, given the array arr = [7, 1, 3, 2, 4, 5, 6] we perform the following steps:\n i arr swap (indices)\n 0 [7, 1, 3, 2, 4, 5, 6] swap (0,3)\n 1 [2, 1, 3, 7, 4, 5, 6] swap (0,1)\n 2 [1, 2, 3, 7, 4, 5, 6] swap (3,4)\n 3 [1, 2, 3, 4, 7, 5, 6] swap (4,5)\n 4 [1, 2, 3, 4, 5, 7, 6] swap (5,6)\n 5 [1, 2, 3, 4, 5, 6, 7]\n\n It took 5 swaps to sort the array.\n\n Function Description: Complete the function minimumSwaps in the editor below. It must return an integer representing the minimum number\n of swaps to sort the array.\n minimumSwaps has the following parameter(s):\n -> arr: an unordered array of integers\n\n Input Format: The first line contains an integer, n, the size of arr. The second line contains n space-separated integers arr[i].\n\n Constraints: 1 <= n <= 10^5\n 1 <= arr[i] <= n\n\n Output Format: Return the minimum number of swaps to sort the given array.\n'''\n# Approach: Visualize a graph and find cycles in it. To sort any cycle with n nodes it takes (n-1) swaps.\ndef minimumSwaps(arr):\n # inititalize variables\n swaps = 0\n\n # create a new array with node numbers and values.\n # subtracting 1 from each element to make it easier to compare with the index values.\n arr_graph = [*enumerate([x-1 for x in arr])]\n\n # initialize a boolean array to record node visit\n visited = [False] * len(arr)\n\n # traverse the nodes, until all are: visited = True \n for node, value in arr_graph:\n # If the enumerated index & value are equal, it means that the node is at the right place\n # Set visited to true and continue to the next node\n if node == value or visited[value] == True:\n continue\n\n # If the enumerated index & value are unequal, it means that the node is at the wrong place\n # set the node as visited\n cycle_size = 0\n value = node\n\n while not visited[value]:\n # set visited to true\n visited[value] = True\n\n # set the value of the node as the index and check the next node, until a visited node is found.\n value = arr_graph[value][1]\n\n # Increase the cycle size with each node redirection\n cycle_size += 1\n swaps += (cycle_size - 1)\n\n print(swaps)\n\nif __name__ == \"__main__\":\n # Accept input\n size, arr = int(input().rstrip()), [*map(int, input().rstrip().split())]\n\n # Call minimumSwaps\n minimumSwaps(arr)\n","repo_name":"AkashSiddharth/PythonWorkspace","sub_path":"DataStructure/Array/minimun_swaps.py","file_name":"minimun_swaps.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74133625865","text":"TIPOS = (('P', 'PASAPORTE'), ('I', 'CARTA DE IDENTIDAD EXTRANJERA'), ('N', 'NIE O TARJETA ESPAÑOLA DE EXTRANJEROS'),\n ('P', 'PASAPORTE'), ('I', 'CARTA DE IDENTIDAD EXTRANJERA'), ('N', 'NIE O TARJETA ESPAÑOLA DE EXTRANJEROS'),\n ('X', 'PERMISO DE RESIDENCIA DE ESTADO MIEMBRO DE LA UE'), ('', 'SELECCIONAR...'), ('D', 'DNI'),\n ('P', 'PASAPORTE'), ('C', 'PERMISO CONDUCIR ESPAÑOL'))\n\nTIPO_ESPANA = (('', 'SELECCIONAR...'), ('D', 'DNI'), ('P', 'PASAPORTE'), ('C', 'PERMISO CONDUCIR ESPAÑOL'))\n\nTIPO_UE = (('P', 'PASAPORTE'), ('I', 'CARTA DE IDENTIDAD EXTRANJERA'), ('N', 'NIE O TARJETA ESPAÑOLA DE EXTRANJEROS'),\n ('X', 'PERMISO DE RESIDENCIA DE ESTADO MIEMBRO DE LA UE'))\n\nTIPO_OTRO = (('P', 'PASAPORTE'), ('I', 'CARTA DE IDENTIDAD EXTRANJERA'), ('N', 'NIE O TARJETA ESPAÑOLA DE EXTRANJEROS'))\n\np = (\n 'Alemania', 'Austria', 'Bulgaria', 'Bélgica', 'Chipre', 'Croacia', 'Dinamarca', 'Eslovaquia', 'Eslovenia', 'España',\n 'Estonia', '', 'Finlandia', 'Francia', 'Grecia', 'Hungria', 'Irlanda', 'Islandia', 'Italia', 'Letonia',\n 'Liechtenstein',\n 'Lituania', 'Luxemburgo', '', 'Malta', 'Noruega', 'Paises Bajos', 'Polonia', 'Portugal', 'Reino Unido',\n 'República Checa', 'Rumania', 'Suecia')\n\nP = (\n 'ALEMANIA', 'AUSTRIA', 'BULGARIA', 'BELGICA', 'CHIPRE', 'CROACIA', 'DINAMARCA', 'ESLOVAQUIA', 'ESLOVENIA', 'ESPAÑA',\n 'ESTONIA', 'FINLANDIA', 'FRANCIA', 'GRECIA', 'HUNGRIA', 'IRLANDA', 'ISLANDIA', 'ITALIA', 'LETONIA', 'LIECHTENSTEIN',\n 'LITUANIA', 'LUXEMBURGO', 'MALTA', 'NORUEGA', 'PAISES BAJOS', 'POLONIA', 'PORTUGAL', 'REINO UNIDO',\n 'REPUBLICA CHECA', 'RUMANIA', 'SUECIA')\n\nPAISES_EU = [('A9103AAAAA', 'ALEMANIA'), ('A9104AAAAA', 'AUSTRIA'), ('A9105AAAAA', 'BELGICA'),\n ('A9134AAAAA', 'BULGARIA'), ('A9107AAAAA', 'CHIPRE'), ('A9140AAAAA', 'CROACIA'),\n ('A9108AAAAA', 'DINAMARCA'), ('A9158AAAAA', 'ESLOVAQUIA'), ('A9141AAAAA', 'ESLOVENIA'),\n ('A9109AAAAA', 'ESPAÑA'), ('A9137AAAAA', 'ESTONIA'), ('A9110AAAAA', 'FINLANDIA'),\n ('A9111AAAAA', 'FRANCIA'), ('A9113AAAAA', 'GRECIA'), ('A9114AAAAA', 'HUNGRIA'), ('A9115AAAAA', 'IRLANDA'),\n ('A9116AAAAA', 'ISLANDIA'), ('A9117AAAAA', 'ITALIA'), ('A9138AAAAA', 'LETONIA'),\n ('A9118AAAAA', 'LIECHTENSTEIN'), ('A9139AAAAA', 'LITUANIA'), ('A9119AAAAA', 'LUXEMBURGO'),\n ('A9120AAAAA', 'MALTA'), ('A9122AAAAA', 'NORUEGA'), ('A9123AAA1A', 'PAISES BAJOS'),\n ('A9124AAAAA', 'POLONIA'), ('A9125AAAAA', 'PORTUGAL'), ('A9112AAA1A', 'REINO UNIDO'),\n ('A9157AAAAA', 'REPUBLICA CHECA'), ('A9127AAAAA', 'RUMANIA'), ('A9128AAAAA', 'SUECIA')]\n\nPAISES_EU_CODES = ['A9103AAAAA', 'A9104AAAAA', 'A9105AAAAA', 'A9134AAAAA', 'A9107AAAAA', 'A9140AAAAA', 'A9108AAAAA',\n 'A9158AAAAA', 'A9141AAAAA', 'A9109AAAAA', 'A9137AAAAA', 'A9110AAAAA', 'A9111AAAAA', 'A9113AAAAA',\n 'A9114AAAAA', 'A9115AAAAA', 'A9116AAAAA', 'A9117AAAAA', 'A9138AAAAA', 'A9118AAAAA', 'A9139AAAAA',\n 'A9119AAAAA', 'A9120AAAAA', 'A9122AAAAA', 'A9123AAA1A', 'A9124AAAAA', 'A9125AAAAA', 'A9112AAA1A',\n 'A9157AAAAA', 'A9127AAAAA', 'A9128AAAAA']\n","repo_name":"jjmartinr01/gauss3","sub_path":"vut/options_tipo_ndi.py","file_name":"options_tipo_ndi.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"635996106","text":"from machine import I2C\n\ni2c = I2C(freq=400000, sda=21, scl=22)\n # create I2C peripheral at frequency of 400kHz\n # depending on the port, extra parameters may be required\n # to select the peripheral and/or pins to use\n\ni2c.scan() # scan for slaves, returning a list of 7-bit addresses\n\ni2c.writeto(42, b'123') # write 3 bytes to slave with 7-bit address 42\ni2c.readfrom(42, 4) # read 4 bytes from slave with 7-bit address 42\n\ni2c.readfrom_mem(42, 8, 3) # read 3 bytes from memory of slave 42,\n # starting at memory-address 8 in the slave\ni2c.writeto_mem(42, 2, b'\\x10') # write 1 byte to memory of slave 42\n # starting at address 2 in the slave\n","repo_name":"louiscklaw/m5stack-playlist","sub_path":"micropython-esp32/I2C-tryout/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12525427579","text":"__author__ = 'fuadissa'\n\nimport traceback\nimport logging\nfrom flask import Flask, request, jsonify\nfrom werkzeug.exceptions import BadRequest\n\nfrom atis_classifer.classifier import Classifier\nfrom settings import MODEL_SERVING_DIR\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(level=logging.INFO)\nLOGGER.addHandler(ch)\n\nHOST = '0.0.0.0'\nPORT = 5000\nREQUEST_METHOD = 'classify'\n\n\nAPP = Flask(__name__)\nAPP_NAME = [key for key, val in locals().items() if val is APP][0]\n\nMODEL = None\n\ndef load():\n global MODEL\n MODEL = Classifier(MODEL_SERVING_DIR)\n\n@APP.route(\"/{}\".format(REQUEST_METHOD), methods=[\"POST\"])\ndef classify():\n try:\n json_request = request.get_json()\n except BadRequest:\n trace_back = traceback.format_exc()\n raise Exception(\"JSON is malformed.\\n\\n{}\".format(trace_back))\n except Exception:\n raise Exception(traceback.format_exc())\n\n if not MODEL:\n load()\n\n results = MODEL.run_classifier(json_request['text'])\n\n return jsonify(results)\n\nif __name__ == '__main__':\n APP.run(host=HOST, port=PORT)\n","repo_name":"issafuad/Intent-Detection-and-Slot-Filling","sub_path":"runner_serve.py","file_name":"runner_serve.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39604584265","text":"class Solution:\n def longestIncreasingPath(self, matrix: List[List[int]]) -> int:\n m, n = len(matrix), len(matrix[0])\n maximum = 0\n memo = {}\n def dfs(i, j):\n nonlocal maximum\n if (i, j) in memo:\n return memo[(i, j)]\n memo[(i, j)] = 1\n for di, dj in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\n ni, nj = i + di, j + dj\n if 0 <= ni < m and 0 <= nj < n:\n if matrix[ni][nj] > matrix[i][j]:\n memo[(i, j)] = max(memo[(i, j)], 1 + dfs(ni, nj))\n maximum = max(maximum, memo[(i, j)])\n return memo[(i, j)]\n \n for i in range(m):\n for j in range(n):\n dfs(i, j)\n return max(memo.values())\n","repo_name":"allenhyp/LeetCodePractice","sub_path":"329_Longest_Increasing_Path_in_a_Matrix.py","file_name":"329_Longest_Increasing_Path_in_a_Matrix.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17812810876","text":"def search(w, target, degree):\n global result\n if w == target:\n return 1\n\n for node in adj_lst[w]:\n if not visited[node]:\n visited[node] = 1\n if search(node, target, degree + 1):\n result = degree + 1\n return 0\n\n\n# n : 전체 사람의 수\nn = int(input())\n# t_parent, t_child : 촌수를 계산해야하는 서로 다른 두 사람의 번호\nt_parent, t_child = map(int, input().split())\n# 관계의 개수\nm = int(input())\nadj_lst = [[] for _ in range(n + 1)]\nvisited = [0] * (n + 1)\nresult = -1\nfor _ in range(m):\n parent, child = map(int, input().split())\n adj_lst[parent].append(child)\n adj_lst[child].append(parent)\n\nsearch(t_child, t_parent, 0)\nprint(result)\n\n\n","repo_name":"DailyStudy08/JAEHYEON","sub_path":"DFS/2644_B.py","file_name":"2644_B.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23909284301","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass ListOperation:\n def reverseList(self, head):\n if not head or not head.next:\n return head\n\n new_head = self.reverseList(head.next)\n next_node = head.next # head -> next_node\n next_node.next = head # head <- next_node\n head.next = None # [x] <- head <- next_node\n return new_head\n\n\ndef test_middle_node(lists):\n list_operation = ListOperation()\n for i, llist in enumerate(lists):\n curr = head = None\n for j in llist:\n if head is not None:\n curr.next = ListNode(j)\n curr = curr.next\n else:\n head = ListNode(j)\n curr = head\n print(\"TC{}:\\t\".format(i + 1), list_operation.reverseList(head))\n\n\nif __name__ == \"__main__\":\n print(\"__name__:\", \"執行此檔案才會顯示\")\n\n lists = [\n [1, 2, 3],\n ]\n test_middle_node(lists)\n","repo_name":"hyj1116/LeetCode-HYJ","sub_path":"1-Easy/876. Middle of the Linked List/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9219689268","text":"import serial\nfrom pynput.keyboard import Key, Controller\nimport os\nfrom threading import *\nimport serialFind as serialFind\nkeyboard = Controller()\n\nController = serial.Serial(serialFind.serialEnd, 115200)\n\ncomands = [\"f1\", \"f2\", \"f3\", \"f4\", \"f5\", \"f6\", \"f7\", \"f8\", \"f9\",\"f10\", \"f11\", \"f12\", \"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"0\",\"-\",\"=\",\"q\",\"w\",\"e\",\"r\",\"t\",\"ip\",\"u\",\"i\",\"o\",\"p\",\"'\",\"[\",\"a\",\"s\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\"ce\",\"ti\", \"]\",\"bi\",\"z\",\"x\",\"c\",\"v\",\"b\",\"n\", \"m\", \",\", \".\", \";\", \"/\", \"up\"]\nkeys = [Key.f1,Key.f2,Key.f3,Key.f4,Key.f5,Key.f6,Key.f7,Key.f8,Key.f9,Key.f10,Key.f11,Key.f12,\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"0\",\"-\",\"=\",\"q\",\"w\",\"e\",\"r\",\"t\",\"y\",\"u\",\"i\",\"o\",\"p\",\"'\",\"[\",\"a\",\"s\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\"ç\",Key.down, \"]\",\"\\\\\",\"z\",\"x\",\"c\",\"v\",\"b\",\"n\", \"m\", \",\", \".\", \";\", Key.left, Key.up]\nprint(\"Hello Welcome to Nano Controller\")\nTHREADS = []\nclass outputTecl(Thread):\n def __init__(self, s):\n try:\n Thread.__init__(self)\n self.data = s\n except:\n print(\"Error in Thread outputTecl\")\n\n def run(self):\n count = 0\n for i in comands:\n if(self.data.startswith(\"!\") and self.data[1:] == i ):\n keyboard.release(keys[count])\n print(self.data)\n elif (self.data == i ):\n print(self.data)\n keyboard.press(keys[count])\n count = count + 1\n\n\nwhile(True):\n b = Controller.readline()\n data = b.decode(\"utf-8\")\n dataSplit = data.split(\"\\r\")\n data = dataSplit[0]\n t = outputTecl(data)\n t.start()","repo_name":"Pcpastre/Controller","sub_path":"controllerSerial.py","file_name":"controllerSerial.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39085777589","text":"from setuptools import setup\nfrom ig_scraper import __version__\n\n\n__author__ = 'Sutrisno Efendi '\n\n\nsetup(\n name='ig-scraper',\n packages=['ig_scraper'],\n version=__version__,\n description='Instagram hashtag scraper',\n license='MIT',\n author='Sutrisno Efendi',\n author_email='kangfend@gmail.com',\n url='https://github.com/kangfend/ig-scraper',\n download_url='https://github.com/kangfend/ig-scraper/tarball/' + __version__, # noqa\n keywords=['Instagram', 'Scraper'],\n install_requires=['requests'],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)\n","repo_name":"kangfend/ig-scraper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"37725713589","text":"\"\"\"\ndjango-introduction-bmi URL Configuration\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.shortcuts import HttpResponseRedirect\n\n\ndef view_main_bmi(request):\n \"\"\" Funkcja przekierowująca do głównego adresu url aplikacji bmi.\"\"\"\n return HttpResponseRedirect(\"calculators/\")\n\nurlpatterns = [\n\n # uruchamianie widoku przy starcie projektu\n url(r'^$', view_main_bmi),\n\n # panel admina\n url(r'^admin/', admin.site.urls),\n\n # przypisanie adresów url z aplikacji calculators\n url(r'^calculators/', include('calculators.urls', namespace='calculators')),\n]\n","repo_name":"PyLadiesPoznanAdvanced/django-introduction-bmi","sub_path":"django-introduction-bmi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2987050805","text":"import json\nfrom haystack.preprocessor.utils import fetch_archive_from_http\n\n\nclass StrategyQAWikiCorpus:\n def __init__(self):\n s3_url_dev = 'https://dpr-nlp.s3.amazonaws.com/startqa_corpus_formatted_for_documentstore.zip'\n fetch_archive_from_http(s3_url_dev, output_dir='corpus/stratCorpus')\n\n def filepath(self):\n return 'corpus/stratCorpus/startqa_corpus_formatted_for_documentstore.json'\n\n def iter_jsons(self, offset=0):\n with open(self.filepath(), 'r') as corpus:\n for i, line in enumerate(corpus):\n if line.startswith('[') or line.startswith(']'):\n continue\n if i < offset:\n continue\n try:\n line = line.strip()\n d = json.loads(line)\n except Exception as e:\n print('fail parsing to json ', line, e)\n continue\n if len(d['text']) > 1200:\n continue\n if d['meta']['title']:\n d['meta']['name'] = d['meta']['title']\n yield d\n\n yield d\n\n def iter_json_batches(self, batch_size=10_000, offset=0, max_size=999999999999):\n dicts = []\n for i, json in enumerate(self.iter_jsons(offset=offset), start=1):\n dicts.append(json)\n if i % batch_size == 0:\n yield dicts\n dicts = []\n if i > max_size:\n break\n yield dicts\n","repo_name":"omerlevi2/NLP_Project","sub_path":"dpr/retrievers/corpus/StrategyQAWikiCorpus.py","file_name":"StrategyQAWikiCorpus.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26025889281","text":"from typing import List, Optional, Tuple\n\nfrom dstack._internal.backend.base import Backend\nfrom dstack._internal.core.error import DstackError\nfrom dstack._internal.core.tag import TagHead\nfrom dstack._internal.hub.schemas import RunInfo\nfrom dstack.api.hub import HubClient\n\n\nclass RunNotFoundError(DstackError):\n pass\n\n\nclass TagNotFoundError(DstackError):\n pass\n\n\ndef list_runs_hub(hub_client: HubClient, run_name: str = \"\", all: bool = False) -> List[RunInfo]:\n runs = hub_client.list_runs(run_name)\n if not all:\n active = any(run.run_head.status.is_active() for run in runs)\n if active:\n runs = list(filter(lambda r: r.run_head.status.is_active(), runs))\n else:\n runs = runs[:1]\n return runs\n\n\ndef get_tagged_run_name_hub(\n hub_client: HubClient, run_name_or_tag_name: str\n) -> Tuple[str, Optional[TagHead]]:\n if run_name_or_tag_name.startswith(\":\"):\n tag_name = run_name_or_tag_name[1:]\n tag_head = hub_client.get_tag_head(tag_name)\n if tag_head is not None:\n return tag_head.run_name, tag_head\n else:\n raise TagNotFoundError(f\"Tag {tag_name} not found\")\n else:\n run_name = run_name_or_tag_name\n job_heads = hub_client.list_job_heads(run_name)\n if job_heads:\n return run_name, None\n else:\n raise RunNotFoundError(f\"Run {run_name} not found\")\n\n\ndef get_tagged_run_name_backend(\n backend: Backend, repo_id: str, run_name: Optional[str], tag_name: Optional[str]\n) -> Tuple[str, Optional[TagHead]]:\n if run_name is None and tag_name is None:\n raise DstackError(\"Run or tag must be specified\")\n if run_name is not None:\n job_heads = backend.list_job_heads(repo_id=repo_id, run_name=run_name)\n if len(job_heads) == 0:\n raise RunNotFoundError(f\"Run {run_name} not found\")\n return run_name, None\n tag_head = backend.get_tag_head(repo_id=repo_id, tag_name=tag_name)\n if tag_head is None:\n raise TagNotFoundError(f\"Tag {tag_name} not found\")\n return tag_head.run_name, tag_head\n","repo_name":"silvacarl2/dstack","sub_path":"cli/dstack/_internal/api/runs.py","file_name":"runs.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"37302721757","text":"## A program to play Blackjack \n## By BlueHat GURU\n## Written in Python 3.4.1\n\n\n# Assumption: players already know how to play blackjack, and do not require educating.\n\n\n\n\n# This is just defining a deck of cards, returning it as a list in numerical_suit order.\ncard_values = ('ace', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'jack', 'queen', 'king')\ndeck_suits = ('_clubs', '_diamonds', '_hearts', '_spades')\ndeckofcards = [ value+suit for value in card_values for suit in deck_suits]\n\n#build a dictionary of card values for the handvalue calculation\ncard_value_dictionary = {}\ncard_value_counter = 0\nfor card in card_values:\n card_value_counter = card_value_counter + 1\n if card_value_counter > 10:\n card_value_counter = 10\n card_value_dictionary[card[:3]] = card_value_counter\n\n#import a couple of packages that blackjack won't run without\nimport random \nimport math\n\n#A string which will be used in several places.\nfailuretocomply = \"\"\"\nLocal laws forbid us from taking money from people\nwho can't understand our instructions.\n\nI'm going to have to ask you to leave.\"\"\"\n\n\n\n\n\n\ndef playgame(playerchips=100, quittracker = False):\n #loops until the player looses all chips or quits\n \n print('Welcome to our casino. You have ' + str(playerchips) + ' chips to play with.')\n numberdecks, quittracker = get_number_decks(quittracker)\n tabledeck= bigdeckmaker(numberdecks)\n random.shuffle(tabledeck)\n random.shuffle(tabledeck) # seems to work better with two\n useddeck = []\n \n while (playerchips > 0 and quittracker == False):\n playerbet, quittracker = get_number_bet(playerchips, quittracker)\n if quittracker: # make sure the player just leaves if they give a bad answer\n break\n bet_result, tabledeck, useddeck, quittracker = playhand(playerbet, playerchips, tabledeck, useddeck, quittracker)\n playerchips = playerchips + bet_result\n print('You now have ' + str(playerchips) + ' chips.')\n if quittracker == False:\n quittracker = quit_query()\n \n print('You leave the casino with ' + str(playerchips) + ' chips.')\n\n\n\n\n\ndef playhand(bet_valueph, playerchipsph, deckatthetable, usedcards, quittrackerph):\n #function to actually play through a hand\n playerhandph=[]\n dealerhandph=[]\n playerhandvalue = 0\n dealerhandvalue = 0\n playerstand = False\n \n deckatthetable, playerhandph, usedcards = dealto(deckatthetable, playerhandph, usedcards)\n handstatement(playerhandph, 'Your')\n deckatthetable, dealerhandph, usedcards = dealto(deckatthetable, dealerhandph, usedcards)\n handstatement(dealerhandph, 'The dealer\\'s')\n deckatthetable, playerhandph, usedcards = dealto(deckatthetable, playerhandph, usedcards)\n handstatement(playerhandph, 'Your')\n \n while ( (playerhandvalue < 21) and (playerstand != True)): \n bet_valueph, playerstand, playerhandph, deckatthetable, usedcards, quittrackerph = \\\n playerdecision_dialog(bet_valueph, playerchipsph, playerstand, playerhandph, deckatthetable, usedcards, quittrackerph)\n if quittrackerph: # make sure the player just leaves if they give a bad answer\n break\n playerhandvalue = handvalue(playerhandph)\n handstatement(playerhandph, 'Your')\n \n while ( dealerhandvalue < 17) and (quittrackerph == False):\n deckatthetable, dealerhandph, usedcards = dealto(deckatthetable, dealerhandph, usedcards)\n dealerhandvalue = handvalue(dealerhandph)\n handstatement(dealerhandph, 'The dealer\\'s')\n\n #int(bet_valueph)\n bet_resultph = bet_result(bet_valueph, playerhandph, dealerhandph)\n usedcards = usedcards + playerhandph + dealerhandph\n return bet_resultph, deckatthetable, usedcards, quittrackerph\n\n\n\n\n\ndef dealto(deckdealtfrom, deckdealtto, sparedeck):\n #dealing a card, and altering appropriate decks/hands; also to reincorporate used cards if we run out\n if len(deckdealtfrom)<=0:\n deckdealtfrom.extend(sparedeck)\n random.shuffle(deckdealtfrom)\n del sparedeck[:]\n deckdealtto.append(deckdealtfrom.pop())\n return deckdealtfrom, deckdealtto, sparedeck\n\n\n\n\ndef handstatement(handtoprint, userflag):\n handstring = ''\n for card in range(len(handtoprint)):\n handstring = handstring + ' ' + handtoprint[card] + ','#try to incorporate proper english at some point\n handvaluestring = str(handvalue(handtoprint))\n #print(userflag + ' hand is ' + handstring + '.')\n print(userflag + ' hand is ' + handstring + ' worth '+ handvaluestring + '.')\n \n\n\n\n \ndef playerdecision_dialog(bet_valuepd, playerchipspd, playerstandpd, playerhandpd, deckdealtfromph, usedcardspd, quittrackerpd, retries=6, decideflag = False):\n #dialog asking the player what action they want this hand.\n while (retries > 0) and (decideflag == False):\n playeraction = input('Do you want to hit, stand, or double? ')\n \n if playeraction in ('h', 'hi', 'ht', 'hit'):\n deckdealtfromph, playerhandpd, usedcardspd = dealto(deckdealtfromph, playerhandpd, usedcardspd)\n decideflag = True\n \n elif playeraction in ('s', 'st', 'sta', 'stan', 'stand'):\n playerstandpd = True\n decideflag = True\n \n elif playeraction in ('d', 'do', 'dou', 'doub', 'doubl', 'double' ):\n if 2*bet_valuepd > playerchipspd:\n print('I\\'m sorry, you can\\'t bet more chips than you have.')\n retries = retries - 1\n else:\n bet_valuepd = 2*bet_valuepd\n deckdealtfromph, playerhandpd, usedcardspd = dealto(deckdealtfromph, playerhandpd, usedcardspd)\n playerstandpd = True\n decideflag = True\n \n #will need to add 'surrender' and 'split' here, if implemented\n #elif playeraction in ('surren', 'surrender'): # supposed to only be available on first decision of hand, and results in quit game -> complicated\n # playerstandpd = True\n # bet_valuepd = bet_valuepd - int(bet_valuepd/2)\n # decideflag = True\n #elif playeraction in ('sp', 'spl', 'spli', 'split'):\n # supposed to only be available on first decision of hand, and results in two player hands -> complicated\n #decideflag = True\n \n else:\n retries = retries - 1\n print('I am sorry, I did not understand what you said. Could you repeat it, please?')\n if retries <= 0:\n quittrackerpd = True\n print(failuretocomply)\n bet_valuepd = 0\n return bet_valuepd, playerstandpd, playerhandpd, deckdealtfromph, usedcardspd, quittrackerpd\n \n\n\n\n\ndef handvalue(handlist): # to compute what a hand is worth\n handinteger = 0\n ace_present = False\n for card_in_hand in handlist:\n if card_in_hand[:3] in list(card_value_dictionary.keys()):\n handinteger = handinteger + card_value_dictionary[card_in_hand[:3]]\n if card_in_hand[:3] == 'ace':\n ace_present = True\n\n #The player will never wish to count more than one ace as an 11\n if (ace_present == True) and (handinteger + 10 <= 21):\n handinteger = handinteger + 10\n return handinteger\n \n\n\n\n\n\ndef bet_result(betvaluebr, playerhandbr, dealerhandbr):\n \n playerblackjackbr = black_jack_check(playerhandbr)\n playerhandvalue = handvalue(playerhandbr)\n dealerblackjackbr = black_jack_check(dealerhandbr)\n dealerhandvalue = handvalue(dealerhandbr)\n \n if playerhandvalue > 21:\n betmodifier = -1\n \n elif dealerhandvalue > 21 and playerhandvalue <= 21:\n betmodifier = 1\n \n elif dealerhandvalue <= 21 and playerhandvalue <= 21:\n if playerhandvalue > dealerhandvalue:\n betmodifier = 1\n elif playerhandvalue < dealerhandvalue:\n betmodifier = -1\n elif playerhandvalue == dealerhandvalue:\n if (playerblackjackbr == True) and (dealerblackjackbr == False):\n betmodifier = 1\n elif (playerblackjackbr == False) and (dealerblackjackbr == True):\n betmodifier = -1\n else:\n betmodifier = 0\n\n if playerblackjackbr == True:\n betmodifier = (3/2)*betmodifier\n \n betresultbr = int(betmodifier * betvaluebr)\n return betresultbr\n\n\n\n\n\ndef black_jack_check(handtocheckbjc, isblackjack = False):\n tenfacelist = []\n for cardvaluebjc in card_values[8:12]:\n tenfacelist = tenfacelist + [cardvaluebjc[:3]]\n if len(handtocheckbjc) == 2:\n if (handtocheckbjc[0][:3] in ['ace']) and (handtocheckbjc[1][:3] in tenfacelist):\n isblackjack = True\n elif (handtocheckbjc[1][:3] in ['ace']) and (handtocheckbjc[0][:3] in tenfacelist):\n isblackjack = True \n return isblackjack\n\n\n\n\n\ndef bigdeckmaker(numberofdecks, fulldeck=deckofcards):\n #takes an integer number of decks and combines them into one big deck\n loopvar = numberofdecks\n makedeck = []\n loopdeck = fulldeck\n while loopvar > 0:\n makedeck.extend(loopdeck[:])\n loopvar = loopvar -1\n return makedeck\n\n\n\n\n\n\ndef get_number_from_player(playermaxchoice, maxstring, inputstring, minstring, quittrackergnfp, retries=6):\n #dialog asking player to choose a number, used for both making bets and picking tables.\n while (retries > 0) :\n playerchoice = input(inputstring)\n if len(playerchoice) < 1:\n playerchoice='user input error'\n elif playerchoice[0] in [ str(range(10)[i]) for i in range(10)]:\n playerchoice_int = int(playerchoice)\n if (playerchoice_int <= playermaxchoice) and (playerchoice_int >0):\n return playerchoice_int, quittrackergnfp\n elif playerchoice_int < 1:\n print(minstring+' Try again.')\n else:\n print(maxstring + str(playermaxchoice) + '. Try again.')\n else: \n print('Please enter an integer.')\n retries = retries - 1\n if retries <= 0: \n print(failuretocomply)\n quittrackergnfp = True\n return 0, quittrackergnfp\n \n\ndef get_number_bet(totalplayerchips, quittrackergnb):\n # written like this for convenience\n betmaxstring = 'You may bet at most '\n betinputstring = 'Please type how many chips would you like to bet: '\n betminstring = 'You must bet at least one.'\n numberofchips, quittrackergnb = get_number_from_player(totalplayerchips, betmaxstring, betinputstring, betminstring, quittrackergnb)\n return numberofchips, quittrackergnb \n\ndef get_number_decks(quittrackergnd):\n # written like this for convenience\n deckmaxstring = 'You may choose at most '\n deckinputstring = 'Please choose how many decks your table is using: '\n deckminstring = 'You can\\'t play with less than one deck of cards.'\n numberofdecks, quittrackergnd = get_number_from_player(8, deckmaxstring, deckinputstring, deckminstring, quittrackergnd)\n return numberofdecks, quittrackergnd\n\n\n\n\n\ndef quit_query(retries=4):\n while (retries > 0):\n ok = input('Do you want to keep playing, Yes or No? ')\n if ok in ('y', 'ye', 'yes'):\n return False\n if ok in ('n', 'no', 'nop', 'nope'):\n return True\n retries = retries - 1\n if retries < 0:\n print(failuretocomply)\n print('Yes or no, please!')\n\n\n\n\n\nif __name__ == \"__main__\":\n playgame()\n\n\n\n\n\n\n\n","repo_name":"BlueHat-GURU/python-blackjack","sub_path":"blackjacktwod.py","file_name":"blackjacktwod.py","file_ext":"py","file_size_in_byte":11579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1863916626","text":"import math\nfrom enum import Enum\nfrom te import platform as tbe_platform\nimport te.lang.cce\nimport te.platform.cce_params as cce_params\nfrom te import tvm\nfrom topi.cce import util\nimport topi\n# pylint: disable=locally-disabled, too-many-locals, too-many-statements\n# pylint: disable=locally-disabled, too-many-arguments, invalid-name\n# pylint: disable=too-many-lines\nBLOCK_SIZE = cce_params.BLOCK_REDUCE\n\n# shape's dim of input and output must be 4\nFEATURE_MAP_DIM = 4\n\n# shape's dim of filter must be 4\nFILTER_DIM = 4\n\n# shape's dim of strides must be 4\nSTRIDES_DIM = 4\n\n# shape's dim of dilations must be 4\nDILATIONS_DIM = 4\n\n#General limitation of the size for input shape\nSHAPE_SIZE_LIMIT = 1 << 30\n\nconst_dtype = \"int32\"\n\n# intrinsic value\npad_mode_call = tvm.call_pure_intrin(\"int32\", \"tvm_cce_string_print\",\n 'PAD_NONE')\ncsize_call = tvm.call_pure_intrin(\"int32\", \"tvm_cce_string_print\", 'CSIZE0')\n\n# vector_dup only can support max repeat 255\nvector_dump_max = 255\n\n\n# pylint: disable=locally-disabled, too-many-locals, too-many-statements\n# pylint: disable=locally-disabled, too-many-arguments, invalid-name\n# pylint: disable=redefined-builtin\ndef check_params(shape, dtype, format):\n \"\"\"\n check the parameters including shape, dtype, format\n\n Parameters\n ----------\n shape : shape of tensor\n\n dtype : data type\n\n format : tensor format\n\n Returns\n -------\n None\n \"\"\"\n if format == \"NCHW\":\n util.check_shape_rule(shape, FEATURE_MAP_DIM, FEATURE_MAP_DIM)\n\n if format == \"HWCK\" or format == \"HWCN\":\n util.check_shape_rule(shape, FILTER_DIM, FILTER_DIM)\n check_list = [\"float16\"]\n util.check_dtype_rule(dtype, check_list)\n\n\nclass BlockTilingType(Enum):\n \"\"\"\n The type of block tiling.\n invalid: tiling param is invalid.\n DIVISIBLE: Block tilting that can be exactly divided.\n FUSED: Uneven block tiling is split. Therefore, the block tiling is merged\n into one axis tiling.\n \"\"\"\n INVALID = 0\n DIVISIBLE = 1\n FUSED = 2\n\n\n# pylint: disable=locally-disabled, too-many-locals, too-many-statements\n# pylint: disable=locally-disabled, too-many-arguments, invalid-name\ndef new_alloc(tvm_ir, dtype, shape, name, scope, double_buffer=False):\n \"\"\"\n decl new buffer\n\n Parameters\n ----------\n tvm_ir : developer API of IR node builder make function.\n\n dtype : buffer date type.\n\n shape : buffer shape.\n\n name : buffer name.\n\n scope : buffer memory scope.\n\n double_buffer : whether need double buffer\n\n Returns\n -------\n buffer : tvm.schedule.Buffer\n Symbolic data buffer.\n\n \"\"\"\n buf_var = tvm_ir.allocate(dtype, shape, name=name, scope=scope)\n if double_buffer:\n tvm_ir.scope_attr(buf_var.asnode(), \"double_buffer_scope\", 1)\n new_buffer = tvm.decl_buffer(shape,\n buf_var.dtype,\n name=name,\n scope=scope,\n data=buf_var)\n\n return new_buffer\n\n\n# pylint: disable=locally-disabled, too-many-locals, too-many-statements\n# pylint: disable=locally-disabled, too-many-arguments, invalid-name\ndef get_tiling(m, k, mini_dx_width):\n \"\"\"\n get tiling size according to m and k\n In the case that L0A can be stored, it is preferred to ensure that\n the m value of the left matrix is larger and k >= 2*BLOCK_SIZE\n\n Parameters\n ----------\n m : number of left matrix rows.\n\n k : number of left matrix columns.\n\n mini_dx_width : dx width of mini kernel.\n\n Returns\n -------\n tile_m : tiling size of left matrix rows.\n\n tile_k : tiling size of left matrix columns.\n\n is_mul : whether split based on mini_dx_width multiples\n \"\"\"\n l0a_size_bytes = tbe_platform.cce_conf.get_soc_spec(\n tbe_platform.cce_conf.L0A_SIZE)\n data_size = tbe_platform.cce_intrin.get_bit_len(\"float16\") // 8\n # tile m according to mini_dx_width multiples\n lcm_m = BLOCK_SIZE // math.gcd(mini_dx_width, BLOCK_SIZE) * mini_dx_width\n\n def _compute_tile_m(piece_k):\n one_block_size = piece_k * BLOCK_SIZE * data_size\n # half the size of the space because of double buffer\n space_m = l0a_size_bytes // one_block_size // 2\n if space_m >= lcm_m:\n floor_m = space_m // lcm_m * lcm_m\n is_mul = True\n else:\n floor_m = space_m // BLOCK_SIZE * BLOCK_SIZE\n is_mul = False\n return floor_m, is_mul\n\n if (k // BLOCK_SIZE) == 1:\n tile_k = 1\n else:\n floor_m, _ = _compute_tile_m(k // BLOCK_SIZE)\n if floor_m == 0:\n tile_k = 2\n else:\n cnt_m = (m + floor_m - 1) // floor_m\n cnt_k = min(cnt_m, k // BLOCK_SIZE)\n tile_k = max(k // BLOCK_SIZE // cnt_k, 2)\n max_m, is_mul = _compute_tile_m(tile_k)\n tile_m = max_m // BLOCK_SIZE\n\n return tile_m, tile_k, is_mul\n\n\n# pylint: disable=locally-disabled, too-many-locals, too-many-statements\n# pylint: disable=locally-disabled, too-many-arguments, invalid-name\n# pylint: disable=too-many-nested-blocks\ndef depthwise_conv2d_backprop_input_kernel(out, src, input_shape, strides,\n pads, dilations):\n \"\"\"\n algorithm:\n\n calculating depthwise conv2d backprop input in IR build method\n\n Parameters\n ----------\n out : input_grad\n\n src : out_backprop and filter\n\n input_shape : input tensor shape\n\n strides : the stride of the sliding window for height and width of the input\n\n pads : padding added to each dimension of the input\n\n dilations : the dilation factor for height and width of input\n\n Returns\n -------\n tvm_ir.get() : the ir_builder created\n\n Developer API of IR node builder make function.\n\n \"\"\"\n tvm_ir = tvm.ir_builder.create()\n dout = src[0]\n filter_init = src[1]\n dx = out[0]\n\n batch, input_c1, input_height, input_width, _ = input_shape\n filter_shape = (int(i.value) for i in filter_init.shape)\n filter_height, filter_width, _, multiplier = filter_shape\n dout_shape = (int(i.value) for i in dout.shape)\n batch, output_c1, output_height, output_width, _ = dout_shape\n stride_h, stride_w = strides\n pad_top, _, pad_left, _ = pads\n dilation_h, dilation_w = dilations\n\n # dilation parameters\n dilated_filter_height = (filter_height - 1) * dilation_h + 1\n dilated_filter_width = (filter_width - 1) * dilation_w + 1\n full_height = input_height + dilated_filter_height - 1\n full_width = input_width + dilated_filter_width - 1\n dilated_height = (output_height - 1) * stride_h + 1\n dilated_width = (output_width - 1) * stride_w + 1\n dilated_pad_top = dilated_filter_height - pad_top - 1\n dilated_pad_left = dilated_filter_width - pad_left - 1\n dilated_pad_bottom = full_height - dilated_height - dilated_pad_top\n dilated_pad_right = full_width - dilated_width - dilated_pad_left\n\n # split kernel\n virtual_pad_top = dilated_pad_top - dilated_pad_top // \\\n stride_h * stride_h\n virtual_pad_left = dilated_pad_left - dilated_pad_left // \\\n stride_w * stride_w\n\n # compute max mini kernel size and tiling plan\n max_kernel_height = (filter_height + stride_h - 1) // stride_h\n max_kernel_width = (filter_width + stride_w - 1) // stride_w\n max_output_pad_height = output_height + dilated_pad_top // stride_h + \\\n dilated_pad_bottom // stride_h\n max_output_pad_width = output_width + dilated_pad_left // stride_w + \\\n dilated_pad_right // stride_w\n max_dx_height = max_output_pad_height - max_kernel_height + 1\n max_dx_width = max_output_pad_width - max_kernel_width + 1\n max_tile_m, max_tile_k, _ = get_tiling(max_dx_height * max_dx_width,\n max_kernel_height * \\\n max_kernel_width * \\\n BLOCK_SIZE, max_dx_width)\n\n def _get_mad_out_dtype():\n if te.platform.CceProductParams().cce_product == \"5.10\":\n mad_out_dtype = \"float16\"\n else:\n mad_out_dtype = \"float32\"\n return mad_out_dtype\n\n def _get_crmode_call():\n if te.platform.CceProductParams().cce_product == \"5.10\":\n return tvm.call_pure_intrin(\"int32\", \"tvm_cce_string_print\",\n 'CRMODE_NONE')\n return tvm.call_pure_intrin(\"int32\", \"tvm_cce_string_print\",\n 'CRMODE_F32toF16_NONE')\n\n def _ceil_to(value, ceil_value):\n if ceil_value <= 0:\n return value\n return ((value + ceil_value - 1) // ceil_value) * ceil_value\n\n def _get_block_tiling(block_axis_value):\n device_core_num = tbe_platform.cce_conf.get_soc_spec(\n tbe_platform.cce_conf.CORE_NUM)\n tiling = {}\n tiling[\"shape\"] = {}\n\n all_block_value = 1\n for value in block_axis_value:\n all_block_value *= value\n tiling[\"fuse\"] = all_block_value\n\n if all_block_value % device_core_num == 0:\n unblock_core_num = device_core_num\n cur_device_core_num = device_core_num // unblock_core_num\n block_tiling = []\n for value in block_axis_value:\n cur_axis_core_num = math.gcd(unblock_core_num, value)\n cur_device_core_num *= cur_axis_core_num\n unblock_core_num = device_core_num // cur_device_core_num\n block_tiling.append(value // cur_axis_core_num)\n tiling[\"shape\"][\"n\"] = block_tiling[0]\n tiling[\"shape\"][\"c1\"] = block_tiling[1]\n tiling[\"block_dim\"] = device_core_num\n tiling[\"type\"] = BlockTilingType.DIVISIBLE\n else:\n tiling[\"fuse_factor\"] = _ceil_to(all_block_value, device_core_num) \\\n // device_core_num\n tiling[\"block_dim\"] = _ceil_to(\n all_block_value,\n tiling[\"fuse_factor\"]) // tiling[\"fuse_factor\"]\n tiling[\"type\"] = BlockTilingType.FUSED\n tiling[\"result\"] = True\n return tiling\n\n block_tiling = _get_block_tiling((batch, input_c1))\n\n def _calculation_block(n_index, c1_index):\n def _dump0(tile_left):\n uint64_all = tvm.const(2**64 - 1, dtype=\"uint64\")\n dump_value = tvm.const(0.0, dtype=\"float16\")\n dump_len = 8 # one repeat can process 8 blocks\n tvm_ir.emit(\n tvm.call_extern(\"uint64\", 'set_vector_mask', uint64_all,\n uint64_all))\n repeat = (tile_left + dump_len - 1) // dump_len\n repeat_loop = repeat // vector_dump_max\n with tvm_ir.if_scope(repeat >= vector_dump_max):\n with tvm_ir.for_range(0, repeat_loop) as i:\n tvm_ir.emit(\n tvm.call_extern(\n dx.dtype, 'vector_dup',\n dx_ub.access_ptr(\n \"rw\",\n offset=(vector_dump_max * dump_len * 16) * i),\n dump_value, vector_dump_max, 1, 1, 8, 8))\n\n with tvm_ir.if_scope(repeat % vector_dump_max > 0):\n tvm_ir.emit(\n tvm.call_extern(\n dx.dtype, \"vector_dup\",\n dx_ub.access_ptr(\"rw\",\n offset=(vector_dump_max * dump_len) *\n repeat_loop), dump_value,\n repeat % vector_dump_max, 1, 1, 8, 8))\n\n tvm_ir.emit(\n tvm.call_extern(\"uint64\", 'set_vector_mask', uint64_all,\n uint64_all))\n\n def _compute_mini_kernel_padding():\n # padding size for the mini kernel convolution\n margin_top = idx_h\n margin_bottom = ((full_height - 1) -\n (idx_h + dilated_filter_height - 1)) % stride_h\n margin_left = idx_w\n margin_right = ((full_width - 1) -\n (idx_w + dilated_filter_width - 1)) % stride_w\n pad_mini_top = (dilated_pad_top - \\\n margin_top) // stride_h\n pad_mini_bottom = (dilated_pad_bottom - \\\n margin_bottom) // stride_h\n pad_mini_left = (dilated_pad_left - \\\n margin_left) // stride_w\n pad_mini_right = (dilated_pad_right - \\\n margin_right) // stride_w\n return pad_mini_top, pad_mini_bottom, pad_mini_left, pad_mini_right\n\n def _load_out_backprop_once():\n lenBurstA = output_height * output_width\n fmap_offset = (n_index * output_c1 +\n c1_index) * lenBurstA * BLOCK_SIZE\n tvm_ir.emit(\n tvm.call_extern(out_backprop_l1.dtype, \"copy_gm_to_cbuf\",\n out_backprop_l1.access_ptr(\"w\"),\n dout.access_ptr(\"r\", offset=fmap_offset), 0, 1,\n lenBurstA, 0, 0, pad_mode_call))\n\n def _load_out_backprop_repeatedly():\n with tvm_ir.for_range(0, output_pad_height, name=\"h\") as h:\n out_backprop_idx = ((-pad_mini_top + h) * output_width + \\\n (-pad_mini_left)) * BLOCK_SIZE\n out_backprop_offset = (n_index * output_c1 +\n c1_index) * BLOCK_SIZE * \\\n output_width * \\\n output_height + \\\n out_backprop_idx\n out_backprop_l1_offset = h * BLOCK_SIZE * \\\n output_pad_width\n tvm_ir.emit(\n tvm.call_extern(\n out_backprop_l1.dtype, \"copy_gm_to_cbuf\",\n out_backprop_l1.access_ptr(\n \"w\", offset=out_backprop_l1_offset),\n dout.access_ptr(\"r\", offset=out_backprop_offset), 0, 1,\n output_pad_width, 0, 0, pad_mode_call))\n\n def _load_mini_kernel(mini_kernel_height, mini_kernel_width):\n stride_offset_h = (first_pos_h + stride_h -\n 1) // stride_h * stride_h\n stride_offset_w = (first_pos_w + stride_w -\n 1) // stride_w * stride_w\n with tvm_ir.for_range(0, mini_kernel_height, name=\"m\") as m:\n with tvm_ir.for_range(0, mini_kernel_width, name=\"n\") as n:\n pos_rot = (mini_kernel_width * m +\n n) * BLOCK_SIZE * BLOCK_SIZE * multiplier\n origin_rot_h = m * stride_h + virtual_pad_top - \\\n idx_h + stride_offset_h\n origin_rot_w = n * stride_w + virtual_pad_left - \\\n idx_w + stride_offset_w\n origin_h = dilated_filter_height - origin_rot_h - 1\n origin_w = dilated_filter_width - origin_rot_w - 1\n filter_offset = (c1_index *\n (filter_height * filter_width) +\n (filter_width * origin_h + origin_w)) * (\n BLOCK_SIZE * BLOCK_SIZE * multiplier)\n\n with tvm_ir.if_scope(\n tvm.all((origin_rot_h % dilation_h) == 0,\n (origin_rot_w % dilation_w) == 0)):\n # load effective filter to l1\n lenBurstB = BLOCK_SIZE * BLOCK_SIZE * multiplier // \\\n BLOCK_SIZE\n tvm_ir.emit(\n tvm.call_extern(\n mini_kernel_l1.dtype, \"copy_gm_to_cbuf\",\n mini_kernel_l1.access_ptr(\"w\", offset=pos_rot),\n filter_init.access_ptr(\"r\",\n offset=filter_offset),\n 0, 1, lenBurstB, 0, 0, pad_mode_call))\n with tvm_ir.else_scope():\n # index map to the dilated zeros\n dump0size = BLOCK_SIZE * BLOCK_SIZE * multiplier // \\\n BLOCK_SIZE\n dump_ub = new_alloc(tvm_ir,\n filter_init.dtype,\n dump0size,\n \"dump_ub\",\n scope=tbe_platform.scope_ubuf)\n tvm_ir.emit(\n tvm.call_extern(\n mini_kernel_l1.dtype, \"set_vector_mask\",\n tvm.const(0, dtype=\"uint64\"),\n tvm.const((2**16 - 1), dtype=\"uint64\")))\n tvm_ir.emit(\n tvm.call_extern(dump_ub.dtype, \"vector_dup\",\n dump_ub.access_ptr(\"rw\"),\n tvm.const(0, dtype=\"float16\"), 1,\n 0, 0, 0, 0))\n tvm_ir.emit(\n tvm.call_extern(\n mini_kernel_l1.dtype, \"set_vector_mask\",\n tvm.const((2**64 - 1), dtype=\"uint64\"),\n tvm.const((2**64 - 1), dtype=\"uint64\")))\n tvm_ir.emit(\n tvm.call_extern(\n mini_kernel_l1.dtype, \"copy_ubuf_to_cbuf\",\n mini_kernel_l1.access_ptr(\"w\", offset=pos_rot),\n dump_ub.access_ptr(\"r\"), 0, 1, dump0size, 0,\n 0))\n\n def _load3d_and_load2d(v, mini_kernel_height, mini_kernel_width,\n tile_m, tile_k):\n # tile M and K according to memory size\n repeat_m = (mini_dx_height * mini_dx_width + tile_m * BLOCK_SIZE -\n 1) // (tile_m * BLOCK_SIZE)\n repeat_k = (mini_kernel_height * mini_kernel_width + tile_k -\n 1) // tile_k\n last_m = mini_dx_height * mini_dx_width - (repeat_m -\n 1) * tile_m * BLOCK_SIZE\n tile_piece = tvm_ir.allocate(const_dtype, (1, ),\n name='tile_piece',\n scope=tbe_platform.scope_reg)\n tile_piece_k = tvm_ir.allocate(const_dtype, (1, ),\n name='tile_piece_k',\n scope=tbe_platform.scope_reg)\n tile_left = tvm_ir.allocate(const_dtype, (1, ),\n name='tile_left',\n scope=tbe_platform.scope_reg)\n # tile M\n with tvm_ir.for_range(0, repeat_m, name=\"loop_m\") as loop_m:\n index_m = loop_m * tile_m * BLOCK_SIZE\n loop_var = tvm.max(loop_m - repeat_m + 2, 0)\n tile_left[0] = loop_var * last_m + (\n 1 - loop_var) * tile_m * BLOCK_SIZE\n tile_piece[0] = (tile_left[0] + BLOCK_SIZE - 1) // BLOCK_SIZE\n\n # tile K\n with tvm_ir.for_range(0, repeat_k, name=\"loop_k\") as loop_k:\n out_backprop_l0a = new_alloc(tvm_ir,\n dout.dtype,\n img2col_buffer_size,\n \"out_backprop_l0a\",\n scope=tbe_platform.scope_ca,\n double_buffer=True)\n mini_kernel_l0b = new_alloc(tvm_ir,\n filter_init.dtype,\n filter_buffer_size,\n \"mini_kernel_l0b\",\n scope=tbe_platform.scope_cb,\n double_buffer=True)\n index_k = loop_k * tile_k * BLOCK_SIZE\n tile_top = tvm_ir.allocate(const_dtype, (1, ),\n name='tile_top',\n scope=tbe_platform.scope_reg)\n with tvm_ir.if_scope(loop_k == (repeat_k - 1)):\n tile_top[0] = mini_kernel_height * mini_kernel_width * \\\n BLOCK_SIZE - index_k\n with tvm_ir.else_scope():\n tile_top[0] = tile_k * BLOCK_SIZE\n tile_piece_k[0] = tile_top[0] // BLOCK_SIZE\n\n def _img2col_repeat_mode0():\n with tvm_ir.for_range(0, tile_piece[0], name=\"i\") as i:\n index_inner = i * BLOCK_SIZE\n first_h = (index_inner //\n mini_dx_width) - set_pad_top\n first_w = (index_inner %\n mini_dx_width) - set_pad_left\n l0a_offset = i * BLOCK_SIZE * BLOCK_SIZE * \\\n mini_kernel_height * mini_kernel_width\n tvm_ir.emit(\n tvm.call_extern(\n out_backprop_l0a.dtype,\n \"img2col_cbuf_to_ca\",\n out_backprop_l0a.access_ptr(\n \"w\", offset=l0a_offset),\n out_backprop_l1.access_ptr(\"r\"), 0, 0,\n first_w, first_h, 0, 1, 1,\n mini_kernel_width, mini_kernel_height, 1,\n 1, 0, 0,\n mini_kernel_width * mini_kernel_height,\n csize_call))\n\n def _img2col_repeat_mode1():\n pos_wk = tvm_ir.allocate(const_dtype, (1, ),\n name='pos_wk',\n scope=tbe_platform.scope_reg)\n pos_hk = tvm_ir.allocate(const_dtype, (1, ),\n name='pos_hk',\n scope=tbe_platform.scope_reg)\n pos_wk[0] = (index_k // BLOCK_SIZE % mini_kernel_width)\n pos_hk[0] = (index_k // BLOCK_SIZE //\n mini_kernel_width)\n with tvm_ir.for_range(0, tile_piece_k[0],\n name=\"i\") as i:\n index_inner = index_m\n first_h = (index_inner //\n mini_dx_width) - set_pad_top\n first_w = (index_inner %\n mini_dx_width) - set_pad_left\n l0a_offset = i * BLOCK_SIZE * BLOCK_SIZE\n tvm_ir.emit(\n tvm.call_extern(\n out_backprop_l0a.dtype,\n \"img2col_cbuf_to_ca\",\n out_backprop_l0a.access_ptr(\n \"w\", offset=l0a_offset),\n out_backprop_l1.access_ptr(\"r\"), pos_wk[0],\n pos_hk[0], first_w, first_h, 0, 1, 1,\n mini_kernel_width, mini_kernel_height, 1,\n 1, tile_piece_k[0], 1, tile_piece[0],\n csize_call))\n pos_wk[0] += tvm.const(1, dtype=const_dtype)\n with tvm_ir.if_scope(\n pos_wk[0] == mini_kernel_width):\n pos_wk[0] = tvm.const(0, dtype=const_dtype)\n pos_hk[0] += tvm.const(1, dtype=const_dtype)\n\n if v == 1:\n if repeat_m == 1 and repeat_k == 1:\n _img2col_repeat_mode0()\n else:\n _img2col_repeat_mode1()\n else:\n with tvm_ir.if_scope(\n tvm.all(repeat_m == 1, repeat_k == 1)):\n _img2col_repeat_mode0()\n with tvm_ir.else_scope():\n _img2col_repeat_mode1()\n\n # load filter from l1 to l0b\n tvm_ir.emit(\n tvm.call_extern(\n mini_kernel_l0b.dtype, \"load_cbuf_to_cb\",\n mini_kernel_l0b.access_ptr(\"w\"),\n mini_kernel_l1.access_ptr(\"r\",\n offset=index_k *\n BLOCK_SIZE), 0,\n tile_piece_k[0] * multiplier, 1, 0, 0))\n\n # accumulate when tile k\n is_cover = tvm_ir.allocate(const_dtype, (1, ),\n name='is_cover',\n scope=tbe_platform.scope_reg)\n with tvm_ir.if_scope(\n loop_k == tvm.const(0, dtype=const_dtype)):\n is_cover[0] = tvm.const(1, dtype=const_dtype)\n with tvm_ir.else_scope():\n is_cover[0] = tvm.const(0, dtype=const_dtype)\n\n # GEMV mode when M=1\n cube_m = tvm_ir.allocate(const_dtype, (1, ),\n name='cube_m',\n scope=tbe_platform.scope_reg)\n with tvm_ir.if_scope(\n tile_left[0] == tvm.const(1, dtype=const_dtype)):\n cube_m[0] = BLOCK_SIZE\n with tvm_ir.else_scope():\n cube_m[0] = tile_left[0]\n\n tvm_ir.emit(\n tvm.call_extern(_get_mad_out_dtype(), \"mad\",\n dx_l0c.access_ptr(\"w\"),\n out_backprop_l0a.access_ptr(\"r\"),\n mini_kernel_l0b.access_ptr(\"r\"),\n cube_m[0],\n tile_piece_k[0] * BLOCK_SIZE,\n BLOCK_SIZE, is_cover[0]))\n\n tvm_ir.emit(\n tvm.call_extern(dx_ub.dtype, \"copy_matrix_cc_to_ubuf\",\n dx_ub.access_ptr(\"w\"),\n dx_l0c.access_ptr(\"r\"), 0, 1,\n tile_piece[0], 0, 0, _get_crmode_call()))\n\n if stride_h == 1 and stride_w == 1:\n offset_gm = (n_index * input_c1 + c1_index) * (\n input_height * input_width *\n BLOCK_SIZE) + index_m * BLOCK_SIZE\n tvm_ir.emit(\n tvm.call_extern(dx.dtype, \"copy_ubuf_to_gm\",\n dx.access_ptr(\"w\", offset=offset_gm),\n dx_ub.access_ptr(\"r\"), 0, 1,\n tile_left[0], 0, 0))\n else:\n loop_cnt = tvm_ir.allocate(const_dtype, (1, ),\n name='loop_cnt',\n scope=tbe_platform.scope_reg)\n\n if is_mul:\n mini_h_end = (index_m + tile_left[0] -\n 1) // mini_dx_width\n mini_w_end = (index_m + tile_left[0] -\n 1) % mini_dx_width\n\n def _emit_copy_ubuf_to_gm_v1(var, end):\n offset_gm = (n_index * input_c1 + c1_index) * \\\n (input_height * input_width * \\\n BLOCK_SIZE) + ((idx_h + var * \\\n stride_h) * input_width + idx_w) * \\\n BLOCK_SIZE\n offset_ub = (mini_dx_width +\n (var - index_m // mini_dx_width - 1) *\n mini_dx_width) * BLOCK_SIZE\n nBurst = end + 1\n tvm_ir.emit(\n tvm.call_extern(\n dx.dtype, \"copy_ubuf_to_gm\",\n dx.access_ptr(\"w\", offset=offset_gm),\n dx_ub.access_ptr(\"r\", offset=offset_ub), 0,\n nBurst, 1, 0, stride_w - 1))\n\n with tvm_ir.if_scope(loop_m == (repeat_m - 1)):\n loop_cnt[0] = (tile_left[0] -\n 1) // mini_dx_width + 1\n with tvm_ir.else_scope():\n loop_cnt[0] = tile_m * BLOCK_SIZE // mini_dx_width\n with tvm_ir.for_range(0, loop_cnt[0], name=\"j\") as j:\n _emit_copy_ubuf_to_gm_v1(\n j + index_m // mini_dx_width,\n (mini_dx_width - 1))\n _emit_copy_ubuf_to_gm_v1(mini_h_end, mini_w_end)\n else:\n mini_h_start = index_m // mini_dx_width\n mini_w_start = index_m % mini_dx_width\n mini_h_end = (index_m + tile_left[0] -\n 1) // mini_dx_width\n mini_w_end = (index_m + tile_left[0] -\n 1) % mini_dx_width\n if v == 2:\n offset_ub_temp = tvm_ir.allocate(\n const_dtype, (1, ),\n name='offset_ub_temp',\n scope=tbe_platform.scope_reg)\n offset_gm_temp = tvm_ir.allocate(\n const_dtype, (1, ),\n name='offset_gm_temp',\n scope=tbe_platform.scope_reg)\n\n def _emit_copy_ubuf_to_gm_v2(var, start, end):\n if v == 1:\n offset_gm = (n_index * input_c1 +\n c1_index) * (input_height * \\\n input_width * BLOCK_SIZE) + \\\n ((idx_h + var * stride_h) * \\\n input_width + (idx_w + \\\n start * stride_w)) * BLOCK_SIZE\n if var == mini_h_start:\n offset_ub = 0\n else:\n offset_ub = (mini_dx_width - \\\n mini_w_start + (var - \\\n mini_h_start - 1) * \\\n mini_dx_width) * BLOCK_SIZE\n nBurst = end - start + 1\n tvm_ir.emit(\n tvm.call_extern(\n dx.dtype, \"copy_ubuf_to_gm\",\n dx.access_ptr(\"w\", offset=offset_gm),\n dx_ub.access_ptr(\"r\",\n offset=offset_ub), 0,\n nBurst, 1, 0, stride_w - 1))\n if v == 2:\n offset_gm_temp[0] = (n_index * input_c1 + \\\n c1_index) * (input_height *\n input_width * \\\n BLOCK_SIZE) + ((idx_h + var * \\\n stride_h) * input_width + (idx_w + \\\n start * stride_w)) * BLOCK_SIZE\n with tvm_ir.if_scope(var == mini_h_start):\n offset_ub_temp[0] = tvm.const(\n 0, dtype=const_dtype)\n with tvm_ir.else_scope():\n offset_ub_temp[0] = (mini_dx_width - \\\n mini_w_start + (var - \\\n mini_h_start - 1) * \\\n mini_dx_width) * \\\n BLOCK_SIZE\n nBurst = end - start + 1\n tvm_ir.emit(\n tvm.call_extern(\n dx.dtype, \"copy_ubuf_to_gm\",\n dx.access_ptr(\n \"w\", offset=offset_gm_temp[0]),\n dx_ub.access_ptr(\n \"r\", offset=offset_ub_temp[0]), 0,\n nBurst, 1, 0, stride_w - 1))\n\n _emit_copy_ubuf_to_gm_v2(mini_h_start, mini_w_start,\n (mini_dx_width - 1))\n loop_cnt[0] = mini_h_end - mini_h_start - 1\n with tvm_ir.for_range(0, loop_cnt[0], name=\"j\") as j:\n ori_j = j + mini_h_start + 1\n _emit_copy_ubuf_to_gm_v2(ori_j, 0,\n (mini_dx_width - 1))\n with tvm_ir.if_scope(mini_h_end - mini_h_start > 0):\n _emit_copy_ubuf_to_gm_v2(mini_h_end, 0, mini_w_end)\n\n if stride_h > filter_height or stride_w > filter_width:\n total_dx_height = input_height\n total_dx_width = input_width\n total_tile_m, _, _ = get_tiling(total_dx_height * total_dx_width,\n BLOCK_SIZE, total_dx_width)\n dx_buffer_size = total_tile_m * BLOCK_SIZE * BLOCK_SIZE\n dx_ub = new_alloc(tvm_ir,\n dx.dtype,\n dx_buffer_size,\n \"dx_ub\",\n scope=tbe_platform.scope_ubuf)\n total_m = (total_dx_height * total_dx_width + total_tile_m *\n BLOCK_SIZE - 1) // (total_tile_m * BLOCK_SIZE)\n _dump0(total_tile_m * BLOCK_SIZE)\n total_left = tvm_ir.allocate(const_dtype, (1, ),\n name='total_left',\n scope=tbe_platform.scope_reg)\n with tvm_ir.for_range(0, total_m, name=\"loop_m\") as loop_m:\n index_m = loop_m * total_tile_m * BLOCK_SIZE\n with tvm_ir.if_scope(loop_m == (total_m - 1)):\n total_left[0] = total_dx_height * total_dx_width - index_m\n with tvm_ir.else_scope():\n total_left[0] = total_tile_m * BLOCK_SIZE\n\n total_offset = (n_index * input_c1 +\n c1_index) * (input_height * input_width *\n BLOCK_SIZE) + index_m * BLOCK_SIZE\n tvm_ir.emit(\n tvm.call_extern(dx.dtype, \"copy_ubuf_to_gm\",\n dx.access_ptr(\"w\", offset=total_offset),\n dx_ub.access_ptr(\"r\"), 0, 1, total_left[0],\n 0, 0))\n\n # stride_h = 4 and stride_w = 4\n if stride_h * stride_w > 16:\n # for loop sink\n mini_kernel_height = tvm_ir.allocate(const_dtype,\n (stride_h * stride_w, ),\n name='mini_kernel_height',\n scope=tbe_platform.scope_reg)\n mini_kernel_width = tvm_ir.allocate(const_dtype,\n (stride_h * stride_w, ),\n name='mini_kernel_width',\n scope=tbe_platform.scope_reg)\n tile_m = tvm_ir.allocate(const_dtype, (stride_h * stride_w, ),\n name='tile_m',\n scope=tbe_platform.scope_reg)\n tile_k = tvm_ir.allocate(const_dtype, (stride_h * stride_w, ),\n name='tile_k',\n scope=tbe_platform.scope_reg)\n for idx_h in range(stride_h):\n for idx_w in range(stride_w):\n break_flag = False\n for m in range(dilated_filter_height):\n for n in range(dilated_filter_width):\n index_h = idx_h + m\n index_w = idx_w + n\n # get one effective filter point\n if ((index_h - virtual_pad_top) % stride_h) == 0 \\\n and ((index_w - virtual_pad_left)\n % stride_w) == 0:\n kernel_h = (dilated_filter_height - m -\n 1) // stride_h + 1\n kernel_w = (dilated_filter_width - n -\n 1) // stride_w + 1\n mini_kernel_height[idx_h * stride_w + idx_w] = \\\n tvm.const(kernel_h, dtype=const_dtype)\n mini_kernel_width[idx_h * stride_w + idx_w] = \\\n tvm.const(kernel_w, dtype=const_dtype)\n break_flag = True\n break\n if break_flag:\n break\n if kernel_h * kernel_w != 0:\n pad_mini_top, pad_mini_bottom, pad_mini_left, \\\n pad_mini_right = _compute_mini_kernel_padding()\n output_pad_height = output_height + pad_mini_top + \\\n pad_mini_bottom\n output_pad_width = output_width + pad_mini_left + \\\n pad_mini_right\n mini_dx_height = output_pad_height - \\\n kernel_h + 1\n mini_dx_width = output_pad_width - \\\n kernel_w + 1\n tile_m[idx_h * stride_w + idx_w], \\\n tile_k[idx_h * stride_w + idx_w], _ = get_tiling(\n mini_dx_height * mini_dx_width,\n kernel_h * kernel_w * \\\n BLOCK_SIZE, mini_dx_width)\n\n with tvm_ir.for_range(0, stride_h, name=\"idx_h\") as idx_h:\n with tvm_ir.for_range(0, stride_w, name=\"idx_w\") as idx_w:\n kernel_area = mini_kernel_height[idx_h*stride_w+idx_w] * \\\n mini_kernel_width[idx_h*stride_w+idx_w]\n\n # dump zeros to gm when mini kernel size = 0\n with tvm_ir.if_scope(kernel_area != 0):\n pad_mini_top, pad_mini_bottom, \\\n pad_mini_left, \\\n pad_mini_right = _compute_mini_kernel_padding()\n output_pad_height = output_height + pad_mini_top + \\\n pad_mini_bottom\n output_pad_width = output_width + pad_mini_left + \\\n pad_mini_right\n mini_dx_height = output_pad_height - \\\n mini_kernel_height[\n idx_h*stride_w+idx_w] + 1\n mini_dx_width = output_pad_width - \\\n mini_kernel_width[\n idx_h*stride_w+idx_w] + 1\n # set load3d config according to padding\n reg_pad_params = tvm_ir.allocate(\n 'uint64', (6, ),\n name='pad_params',\n scope=tbe_platform.scope_reg)\n set_pad_left = tvm.max(pad_mini_left, 0)\n set_pad_right = tvm.max(pad_mini_right, 0)\n set_pad_top = tvm.max(pad_mini_top, 0)\n set_pad_bottom = tvm.max(pad_mini_bottom, 0)\n set_output_width = tvm.min(output_pad_width,\n output_width)\n set_output_height = tvm.min(output_pad_height,\n output_height)\n reg_pad_params[0] = topi.cast(set_pad_left,\n dtype='uint64')\n reg_pad_params[1] = topi.cast(set_pad_right,\n dtype='uint64')\n reg_pad_params[2] = topi.cast(set_pad_top,\n dtype='uint64')\n reg_pad_params[3] = topi.cast(set_pad_bottom,\n dtype='uint64')\n reg_pad_params[4] = topi.cast(set_output_width,\n dtype='uint64')\n reg_pad_params[5] = topi.cast(set_output_height,\n dtype='uint64')\n\n # malloc storage space for the computation\n filter_size = max_kernel_height * max_kernel_width * \\\n BLOCK_SIZE * BLOCK_SIZE * multiplier\n dout_size = output_height * output_width * \\\n BLOCK_SIZE\n dx_buffer_size = max_tile_m * BLOCK_SIZE * BLOCK_SIZE\n dx_ub = new_alloc(tvm_ir,\n dx.dtype,\n dx_buffer_size,\n \"dx_ub\",\n scope=tbe_platform.scope_ubuf)\n\n # load3d register configuration\n # place output parameter in corresponding bit\n fmatrixConfig = reg_pad_params[4] \\\n | reg_pad_params[5] << 16 \\\n | reg_pad_params[0] << 32 \\\n | reg_pad_params[1] << 40 \\\n | reg_pad_params[2] << 48 \\\n | reg_pad_params[3] << 56\n tvm_ir.emit(\n tvm.call_extern(dout.dtype, \"set_fmatrix\",\n fmatrixConfig))\n\n out_backprop_l1 = new_alloc(tvm_ir,\n dout.dtype,\n dout_size,\n \"out_backprop_l1\",\n scope=\n tbe_platform.scope_cbuf,\n double_buffer=True)\n\n # move feature map from out to l1\n with tvm_ir.if_scope(\n tvm.all(pad_mini_left >= 0, pad_mini_top >= 0,\n pad_mini_right >= 0,\n pad_mini_bottom >= 0)):\n _load_out_backprop_once()\n with tvm_ir.else_scope():\n _load_out_backprop_repeatedly()\n\n # move filter from out to l1\n mini_kernel_l1 = new_alloc(tvm_ir,\n filter_init.dtype,\n filter_size,\n \"mini_kernel_l1\",\n scope=\n tbe_platform.scope_cbuf,\n double_buffer=True)\n\n first_pos_h = tvm.max(idx_h - virtual_pad_top, 0)\n first_pos_w = tvm.max(idx_w - virtual_pad_left, 0)\n _load_mini_kernel(\n mini_kernel_height[idx_h * stride_w + idx_w],\n mini_kernel_width[idx_h * stride_w + idx_w])\n\n img2col_buffer_size = max_tile_m * BLOCK_SIZE * \\\n max_tile_k * BLOCK_SIZE\n filter_buffer_size = max_tile_k * BLOCK_SIZE * \\\n BLOCK_SIZE * multiplier\n is_mul = False\n dx_l0c = new_alloc(tvm_ir,\n _get_mad_out_dtype(),\n dx_buffer_size,\n \"dx_l0c\",\n scope=tbe_platform.scope_cc)\n _load3d_and_load2d(\n 2, mini_kernel_height[idx_h * stride_w + idx_w],\n mini_kernel_width[idx_h * stride_w + idx_w],\n tile_m[idx_h * stride_w + idx_w],\n tile_k[idx_h * stride_w + idx_w])\n else:\n # unroll for loop\n for idx_h in range(stride_h):\n for idx_w in range(stride_w):\n mini_kernel_height = 0\n mini_kernel_width = 0\n break_flag = False\n for m in range(dilated_filter_height):\n for n in range(dilated_filter_width):\n index_h = idx_h + m\n index_w = idx_w + n\n # get one effective filter point\n if ((index_h - virtual_pad_top) % stride_h) == 0 \\\n and ((index_w - virtual_pad_left)\n % stride_w) == 0:\n mini_kernel_height = (dilated_filter_height -\n m - 1) // stride_h + 1\n mini_kernel_width = (dilated_filter_width - n -\n 1) // stride_w + 1\n break_flag = True\n break\n if break_flag:\n break\n kernel_area = mini_kernel_height * mini_kernel_width\n\n if kernel_area != 0:\n pad_mini_top, pad_mini_bottom, \\\n pad_mini_left, pad_mini_right = \\\n _compute_mini_kernel_padding()\n output_pad_height = output_height + pad_mini_top + \\\n pad_mini_bottom\n output_pad_width = output_width + pad_mini_left + \\\n pad_mini_right\n mini_dx_height = output_pad_height - \\\n mini_kernel_height + 1\n mini_dx_width = output_pad_width - \\\n mini_kernel_width + 1\n tile_m, tile_k, is_mul = get_tiling(\n mini_dx_height * mini_dx_width,\n mini_kernel_height * mini_kernel_width * \\\n BLOCK_SIZE, mini_dx_width)\n # set load3d config according to padding\n set_pad_left = max(pad_mini_left, 0)\n set_pad_right = max(pad_mini_right, 0)\n set_pad_top = max(pad_mini_top, 0)\n set_pad_bottom = max(pad_mini_bottom, 0)\n set_output_width = min(output_pad_width, output_width)\n set_output_height = min(output_pad_height,\n output_height)\n # malloc storage space for the computation\n filter_size = kernel_area * BLOCK_SIZE * \\\n BLOCK_SIZE * multiplier\n dout_size = set_output_height * set_output_width * \\\n BLOCK_SIZE\n dx_buffer_size = tile_m * BLOCK_SIZE * BLOCK_SIZE\n dx_ub = new_alloc(tvm_ir,\n dx.dtype,\n dx_buffer_size,\n \"dx_ub\",\n scope=tbe_platform.scope_ubuf)\n\n # load3d register configuration\n # place output parameter in corresponding bit\n fmatrixConfig = set_output_width \\\n | set_output_height << 16 \\\n | set_pad_left << 32 \\\n | set_pad_right << 40 \\\n | set_pad_top << 48 \\\n | set_pad_bottom << 56\n tvm_ir.emit(\n tvm.call_extern(\n dout.dtype, \"set_fmatrix\",\n tvm.const(fmatrixConfig, dtype=\"uint64\")))\n\n out_backprop_l1 = new_alloc(tvm_ir,\n dout.dtype,\n dout_size,\n \"out_backprop_l1\",\n scope=\n tbe_platform.scope_cbuf,\n double_buffer=True)\n\n # move feature map from out to l1\n if pad_mini_left >= 0 and pad_mini_top >= 0 \\\n and pad_mini_right >= 0 \\\n and pad_mini_bottom >= 0:\n _load_out_backprop_once()\n else:\n _load_out_backprop_repeatedly()\n\n mini_kernel_l1 = new_alloc(tvm_ir,\n filter_init.dtype,\n filter_size,\n \"mini_kernel_l1\",\n scope=\n tbe_platform.scope_cbuf,\n double_buffer=True)\n\n first_pos_h = max(idx_h - virtual_pad_top, 0)\n first_pos_w = max(idx_w - virtual_pad_left, 0)\n _load_mini_kernel(mini_kernel_height,\n mini_kernel_width)\n\n img2col_buffer_size = tile_m * BLOCK_SIZE * \\\n tile_k * BLOCK_SIZE\n filter_buffer_size = tile_k * BLOCK_SIZE * \\\n BLOCK_SIZE * multiplier\n dx_l0c = new_alloc(tvm_ir,\n _get_mad_out_dtype(),\n dx_buffer_size,\n \"dx_l0c\",\n scope=tbe_platform.scope_cc)\n _load3d_and_load2d(1, mini_kernel_height,\n mini_kernel_width, tile_m, tile_k)\n\n block_index = tvm.thread_axis(\"blockIdx.x\")\n tvm_ir.scope_attr(block_index, \"thread_extent\", block_tiling[\"block_dim\"])\n\n if block_tiling[\"type\"] == BlockTilingType.DIVISIBLE:\n with tvm_ir.for_range(0, block_tiling[\"shape\"][\"n\"],\n name=\"n_i\") as n_i:\n with tvm_ir.for_range(0, block_tiling[\"shape\"][\"c1\"],\n name=\"loop_c1\") as loop_c1:\n n_0 = block_index // (input_c1 // block_tiling[\"shape\"][\"c1\"])\n n = n_0 * block_tiling[\"shape\"][\"n\"] + n_i\n c1_0 = block_index - n_0 * (input_c1 //\n block_tiling[\"shape\"][\"c1\"])\n c1 = c1_0 * block_tiling[\"shape\"][\"c1\"] + loop_c1\n _calculation_block(n, c1)\n else:\n with tvm_ir.for_range(0, block_tiling[\"fuse_factor\"],\n name=\"fuse_factor\") as fuse_factor:\n nc1 = block_index * block_tiling[\"fuse_factor\"] + fuse_factor\n with tvm_ir.if_scope(nc1 < block_tiling[\"fuse\"]):\n n = nc1 // input_c1\n c1 = nc1 % input_c1\n _calculation_block(n, c1)\n\n return tvm_ir.get()\n\n\n# pylint: disable=locally-disabled, too-many-locals, too-many-statements\n# pylint: disable=locally-disabled, too-many-arguments, invalid-name\n# pylint: disable=too-many-branches, redefined-builtin\n@util.check_input_type(dict, dict, dict, (list, tuple), (list, tuple),\n (list, tuple), (list, tuple), str, str)\ndef depthwise_conv2d_backprop_input_d(\n filter,\n out_backprop,\n input_grad,\n input_size,\n strides,\n dilations=(1, 1, 1, 1),\n pads=(0, 0, 0, 0),\n data_format='NHWC',\n kernel_name=\"depthwise_conv2d_backprop_input\"):\n \"\"\"\n algorithm: depthwise conv2d backprop input\n\n computes the gradients of depthwise convolution with respect to the input\n\n Parameters\n ----------\n filter: dict\n 4-D origin shape and dtype of filter tensor\n support [H, W, C, K], K is channel_multiplier\n\n out_backprop: dict\n 4-D origin shape and dtype of out_backprop tensor,\n support [N, Co, Ho, Wo] or [N, Ho, Wo, Co],\n gradients w.r.t. the output of the convolution\n\n input_grad: dict\n 4-D origin shape and dtype of input tensor,\n support [N, C, H, W] or [N, H, W, C]\n\n input_size: a list or tuple of four ints\n shape of input tensor, support [N, C, H, W] or [N, H, W, C]\n\n strides: a list or tuple of four ints\n the stride of the sliding window for height and width of the input of\n the convolution, support [1, 1, stride_height, stride_width] or\n [1, stride_height, stride_width, 1]\n\n dilations: an optional list or tuple of four ints\n the dilation factor for each dimension of input\n if set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension, support [1, 1, dilation_height,\n dilation_width] or [1, dilation_height, dilation_width, 1]\n\n pads: a list or tuple of four ints\n padding added to each dimension of the input\n\n data_format : str\n shape of origine shape of featuremap [N, C, H, W] or [N, H, W, C]\n\n kernel_name: str\n cce kernel name, default value is \"depthwise_conv2d_backprop_input\"\n\n Returns\n -------\n None\n \"\"\"\n def _ceil(x):\n \"\"\"\n Return the least multiple of 16 integer number\n which is greater than or equal to x.\n \"\"\"\n return ((x + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE\n\n input_shape = input_grad.get(\"ori_shape\")\n if input_size != input_shape:\n raise RuntimeError(\n \"the output shape of depthwise_conv2d_backprop_input must be\"\n \"same with input_size.\")\n input_dtype = input_grad.get(\"dtype\").lower()\n filter_shape = filter.get(\"ori_shape\")\n filter_dtype = filter.get(\"dtype\").lower()\n\n output_shape = out_backprop.get(\"ori_shape\")\n output_dtype = out_backprop.get(\"dtype\").lower()\n\n input_ori_format = input_grad.get('ori_format')\n if input_ori_format != 'NCHW' and input_ori_format != 'NHWC':\n raise RuntimeError(\n \"The format of input_grad in depthwise_conv2d_backprop_input only \"\n \"supported NCHW or NHWC.\")\n filter_ori_format = filter.get('ori_format')\n if filter_ori_format not in ('HWCK', 'HWCN', 'NCHW'):\n raise RuntimeError(\n \"The format of filter in depthwise_conv2d_backprop_input \"\n \"only supported HWCK(HWCN)/NCHW.\")\n dout_ori_format = out_backprop.get('ori_format')\n if dout_ori_format != 'NCHW' and dout_ori_format != 'NHWC':\n raise RuntimeError(\n \"The format of out_backprop in depthwise_conv2d_backprop_input \"\n \"only supported NCHW or NHWC.\")\n\n # index of the strides dimension\n DIM_S_N, DIM_S_C, DIM_S_H, DIM_S_W = 0, 1, 2, 3\n # index of the dilations dimension\n DIM_D_N, DIM_D_C, DIM_D_H, DIM_D_W = 0, 1, 2, 3\n # index of the out_backprop dimension\n DIM_N, DIM_C, _, _ = 0, 1, 2, 3\n # index of the filter dimension\n _, _, DIM_W_C, DIM_W_K = 0, 1, 2, 3\n\n if input_ori_format == 'NHWC':\n DIM_S_N, DIM_S_H, DIM_S_W, DIM_S_C = 0, 1, 2, 3\n DIM_D_N, DIM_D_H, DIM_D_W, DIM_D_C = 0, 1, 2, 3\n input_shape = [\n input_shape[0], input_shape[3], input_shape[1], input_shape[2]\n ]\n if dout_ori_format == 'NHWC':\n output_shape = [\n output_shape[0], output_shape[3], output_shape[1], output_shape[2]\n ]\n if filter_ori_format == \"NCHW\":\n filter_shape = [\n filter_shape[2], filter_shape[3], filter_shape[1], filter_shape[0]\n ]\n if data_format != 'NCHW' and data_format != 'NHWC':\n raise RuntimeError(\n \"The format of input in depthwise_conv2d_backprop input only \"\n \"supported NCHW and NHWC.\")\n\n # check if the parameter is valid\n check_params(filter_shape, filter_dtype, \"HWCK\")\n check_params(output_shape, output_dtype, \"NCHW\")\n check_params(input_shape, input_dtype, \"NCHW\")\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(output_shape, FEATURE_MAP_DIM, FEATURE_MAP_DIM)\n util.check_shape_rule(filter_shape, FILTER_DIM, FILTER_DIM)\n util.check_shape_rule(input_shape, FEATURE_MAP_DIM, FEATURE_MAP_DIM)\n util.check_shape_rule(strides, STRIDES_DIM, STRIDES_DIM)\n util.check_shape_rule(dilations, DILATIONS_DIM, DILATIONS_DIM)\n util.check_shape_size(input_shape, SHAPE_SIZE_LIMIT)\n util.check_shape_size(filter_shape, SHAPE_SIZE_LIMIT)\n util.check_shape_size(output_shape, SHAPE_SIZE_LIMIT)\n\n if strides[DIM_S_H] != strides[DIM_S_W]:\n raise RuntimeError(\n \"current implementation only supports equal length strides in \"\n \"the row and column dimensions.\")\n\n if (strides[DIM_S_N] != 1) or (strides[DIM_S_C] != 1):\n raise RuntimeError(\"the N-dim and C-dim of stride must be equal to 1.\")\n\n if (dilations[DIM_D_N] != 1) or (dilations[DIM_D_C] != 1):\n raise RuntimeError(\n \"the N-dim and C-dim of dilation must be equal to 1.\")\n\n if input_shape[DIM_N] != output_shape[DIM_N]:\n raise RuntimeError(\n \"feature map N-dim must be equal to out_backprop N-dim.\")\n\n if filter_shape[DIM_W_K] != 1:\n raise RuntimeError(\"the K(N)-dim of filter must be equal to 1.\")\n\n if input_shape[DIM_C] != output_shape[DIM_C]:\n raise RuntimeError(\n \"feature map C-dim must be equal to out_backprop C-dim.\")\n\n if (_ceil(input_shape[DIM_C]) //\n BLOCK_SIZE) != (_ceil(filter_shape[DIM_W_C]) // BLOCK_SIZE):\n raise RuntimeError(\n \"support multiplier = 1, feature map C-dim must be equal to \"\n \"filter C-dim.\")\n\n # check pad parameter\n if len(pads) != 4:\n raise RuntimeError(\"pads shape should be 4d.\")\n\n # input parameters\n batch, input_channel, input_height, input_width = input_shape\n filter_height, filter_width, filter_channel, _ = filter_shape\n input_c1 = (input_channel + BLOCK_SIZE - 1) // BLOCK_SIZE\n stride_h, stride_w = strides[DIM_S_H], strides[DIM_S_W]\n dilation_h, dilation_w = dilations[DIM_D_H], dilations[DIM_D_W]\n strides = (stride_h, stride_w)\n dilations = (dilation_h, dilation_w)\n\n # output parameters\n batch, output_channel, output_height, output_width = output_shape\n output_c1 = (output_channel + BLOCK_SIZE - 1) // BLOCK_SIZE\n\n l1_size = tbe_platform.cce_conf.get_soc_spec(tbe_platform.cce_conf.L1_SIZE)\n data_size = tbe_platform.cce_intrin.get_bit_len(output_dtype) // 8\n dilated_filter_height = (filter_height - 1) * dilation_h + 1\n dilated_filter_width = (filter_width - 1) * dilation_w + 1\n max_hw_in_l1 = (l1_size - dilated_filter_height * dilated_filter_width *\n BLOCK_SIZE * BLOCK_SIZE * data_size) // (\n data_size * output_width * output_height)\n dilated_output_w = output_width * stride_w - (stride_w - 1)\n max_dh_in_l1 = (l1_size - filter_height * filter_width * BLOCK_SIZE *\n BLOCK_SIZE * data_size) // (data_size * dilated_output_w *\n BLOCK_SIZE) - (filter_height -\n 1)\n\n pad_top, pad_bottom, pad_left, pad_right = pads\n full_height = input_height + pad_top + pad_bottom\n full_width = input_width + pad_left + pad_right\n out_backprop_height = (full_height - dilated_filter_height) // stride_h + 1\n out_backprop_width = (full_width - dilated_filter_width) // stride_w + 1\n\n if output_height != out_backprop_height:\n raise RuntimeError(\n \"Row number of out_backprop in depthwise_conv2d_backprop_input\"\n \" is wrong!\")\n if output_width != out_backprop_width:\n raise RuntimeError(\"Column number of out_backprop in\"\n \" depthwise_conv2d_backprop_input is wrong!\")\n\n if max_hw_in_l1 >= BLOCK_SIZE and output_height != 1 and output_width != 1:\n input_shape = [batch, input_c1, input_height, input_width, BLOCK_SIZE]\n output_shape = [batch, output_c1, output_height, output_width,\n BLOCK_SIZE]\n dout = tvm.placeholder(output_shape, dtype=output_dtype, name='dout')\n filter_init = tvm.placeholder(filter_shape,\n dtype=filter_dtype,\n name='filter')\n res = tvm.extern(\n [output_shape, filter_shape], [dout, filter_init],\n lambda ins, outs: depthwise_conv2d_backprop_input_kernel(\n outs, ins, input_shape, strides, pads, dilations),\n name=\"dx\",\n dtype=input_dtype)\n sch = tvm.create_schedule(res.op)\n elif max_dh_in_l1 >= BLOCK_SIZE and dilation_h == 1 and dilation_w == 1:\n filter_shape = [_ceil(filter_channel) // BLOCK_SIZE,\n filter_height * filter_width, 1, BLOCK_SIZE,\n BLOCK_SIZE]\n filter_init = tvm.placeholder(filter_shape,\n dtype=filter_dtype,\n name='filter')\n\n output_shape = [batch, output_c1, 1, output_height, output_width,\n BLOCK_SIZE]\n dout = tvm.placeholder(output_shape, dtype=output_dtype, name='dout')\n\n input_shape = [batch, input_c1, 1, input_height, input_width,\n BLOCK_SIZE]\n res = te.lang.cce.depthwise_conv2d_backprop_input_d_compute(\n input_shape, filter_init, dout, [filter_height, filter_width],\n strides, pads)\n\n sch = te.lang.cce.te_schedule. \\\n depthwise_conv2d_backprop_input_d_schedule(res)\n else:\n raise RuntimeError(\"L1's memory space is not enough!\")\n\n with tbe_platform.build_config:\n tvm.build(sch, [filter_init, dout, res], \"cce\", name=kernel_name)\n","repo_name":"jizhuoran/caffe-huawei-atlas-convertor","sub_path":"convertor/huawei/impl/depthwise_conv2d_backprop_input_d.py","file_name":"depthwise_conv2d_backprop_input_d.py","file_ext":"py","file_size_in_byte":66041,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"31412127482","text":"import abc\nimport datetime\nimport aiohttp\nimport asyncio\nimport pokeretriever.PokedexObject as Poke\nfrom pokedex import Request\n\n\nasync def get_pokedex_data(key, url, session) -> dict:\n \"\"\"\n Gets the pokedex data from the PokeAPI\n :param key: input data/request\n :param url: reference url of the API\n :param session:\n :return:\n \"\"\"\n try:\n target_url = url.format(key)\n response = await session.request(method=\"GET\", url=target_url)\n json_dict = await response.json()\n return json_dict\n except aiohttp.ContentTypeError:\n return {'error': \"error\"}\n\n\nclass BaseHandler(abc.ABC):\n \"\"\"\n Base handler for the three types of requests\n \"\"\"\n\n def __init__(self, next_handler=None):\n self.next_handler = next_handler\n\n @abc.abstractmethod\n def handle_request(self, r: Request):\n pass\n\n def set_handler(self, handler):\n self.next_handler = handler\n\n\nclass InputHandler(BaseHandler):\n \"\"\"\n Handle input mode for files or raw data\n \"\"\"\n def handle_request(self, r: Request):\n \"\"\"\n Handle first chain\n :param r:\n :return:\n \"\"\"\n if r.input_data is None:\n with open(r.input_file, mode='r') as f:\n r.raw_data = f.read().splitlines()\n else:\n r.raw_data = r.input_data\n r.number_of_requests = len(r.raw_data)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.next_handler.handle_request(r))\n\n\nclass PokemonRequestHandler(BaseHandler):\n \"\"\"\n Handles pokemon requests\n \"\"\"\n\n async def handle_request(self, r: Request):\n \"\"\"\n Creates the Pokemon object(s) requested\n :param r:\n :return:\n \"\"\"\n url = \"https://pokeapi.co/api/v2/pokemon/{}/\"\n # print(r.raw_data)\n async with aiohttp.ClientSession() as session:\n async_coroutines = [get_pokedex_data(key, url, session)\n for key in r.raw_data]\n responses = await asyncio.gather(*async_coroutines)\n for res in responses:\n try:\n r.result.append(Poke.Pokemon(**res))\n except TypeError:\n r.result.append(\"An error occurred. Skipping this request.\")\n pass\n\n self.next_handler.handle_request(r)\n\n\nclass PokemonExpandedHandler(BaseHandler):\n \"\"\"\n Handles pokemon requests when the expanded flag is active\n \"\"\"\n async def handle_request(self, r: Request):\n \"\"\"\n Gets information from the Pokemon API.\n :param r: a request.\n :return: None.\n \"\"\"\n # Get each pokemon\n url = \"https://pokeapi.co/api/v2/pokemon/{}/\"\n async with aiohttp.ClientSession() as session:\n async_coroutines = [get_pokedex_data(key, url, session)\n for key in r.raw_data]\n\n responses = await asyncio.gather(*async_coroutines)\n\n list_pokemon = []\n\n for res in responses:\n try:\n list_pokemon.append(Poke.Pokemon(**res))\n except TypeError:\n list_pokemon.append(\"An error occurred. Skipping this request.\")\n pass\n\n for pokemon in list_pokemon:\n # Get each ability from a pokemon\n url = \"https://pokeapi.co/api/v2/ability/{}/\"\n async with aiohttp.ClientSession() as session:\n async_coroutines2 = [get_pokedex_data(key, url, session)\n for key in pokemon.ability_list()]\n\n responses2 = await asyncio.gather(*async_coroutines2)\n ability_list = []\n for ability in responses2:\n ability_list.append(Poke.PokemonAbility(**ability))\n pokemon.abilities = ability_list\n # Get each move from a pokemon\n url = \"https://pokeapi.co/api/v2/move/{}/\"\n async with aiohttp.ClientSession() as session:\n async_coroutines3 = [get_pokedex_data(key, url, session)\n for key in pokemon.move_list()]\n\n responses3 = await asyncio.gather(*async_coroutines3)\n move_list = []\n for move in responses3:\n move_list.append(Poke.PokemonMove(**move))\n pokemon.moves = move_list\n # Get each stat from a pokemon\n url = \"https://pokeapi.co/api/v2/stat/{}/\"\n async with aiohttp.ClientSession() as session:\n async_coroutines4 = [get_pokedex_data(key, url, session)\n for key in pokemon.stat_list()]\n\n responses4 = await asyncio.gather(*async_coroutines4)\n stat_list = []\n for stat in responses4:\n stat_list.append(Poke.PokemonStat(**stat))\n pokemon.stats = stat_list\n\n r.result = list_pokemon\n\n self.next_handler.handle_request(r)\n\n\nclass AbilityRequestHandler(BaseHandler):\n \"\"\"\n Handle ability requests\n \"\"\"\n\n async def handle_request(self, r: Request):\n \"\"\"\n Creates PokeAbility objects\n :param r:\n :return:\n \"\"\"\n url = \"https://pokeapi.co/api/v2/ability/{}/\"\n async with aiohttp.ClientSession() as session:\n async_coroutines = [get_pokedex_data(key, url, session)\n for key in r.raw_data]\n responses = await asyncio.gather(*async_coroutines)\n for res in responses:\n try:\n r.result.append(Poke.PokemonAbility(**res))\n except TypeError:\n r.result.append(\"An error occurred. Skipping this request.\")\n pass\n\n self.next_handler.handle_request(r)\n\n\nclass MoveRequestHandler(BaseHandler):\n \"\"\"\n Handle move requests\n \"\"\"\n\n async def handle_request(self, r: Request):\n \"\"\"\n Create PokeMove objects\n :param r:\n :return:\n \"\"\"\n\n url = \"https://pokeapi.co/api/v2/move/{}/\"\n async with aiohttp.ClientSession() as session:\n async_coroutines = [get_pokedex_data(key, url, session)\n for key in r.raw_data]\n responses = await asyncio.gather(*async_coroutines)\n for res in responses:\n try:\n r.result.append(Poke.PokemonMove(**res))\n except TypeError:\n r.result.append(\"An error occurred. Skipping this request.\")\n pass\n\n self.next_handler.handle_request(r)\n\n\nclass OutputHandler(BaseHandler):\n \"\"\"\n Handles the output of the PokeDex\n \"\"\"\n def handle_request(self, r: Request):\n \"\"\"\n Prints out the result to console or saves them to a specified .txt file\n :param r:\n :return:\n \"\"\"\n if r.output == 'print':\n for response in r.result:\n print(response, \"\\n\")\n else:\n with open(r.output, mode='w') as my_text_file:\n date = datetime.datetime.now()\n string_date = date.strftime(\"%d/%m/%Y %H:%M\")\n my_text_file.write(f\"Timestamp: {string_date}\\n\"\n f\"Number of requests: {r.number_of_requests}\\n\")\n for response in r.result:\n my_text_file.write(f\"{response}\\n\")\n","repo_name":"tan-jacob/COMP3522_Assignment3_A01206825_A00912481","sub_path":"pokeretriever/RequestHandlers.py","file_name":"RequestHandlers.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24326812656","text":"import xlrd \nimport pandas as pd\nimport xlwt \nfrom xlwt import Workbook \n\nloc = (\"C:/Users/Lenovo/Desktop/Bitirme/Word2Vec.v3/files/test_sonuc2.xlsx\")\nf = open(\"C:/Users/Lenovo/Desktop/Bitirme/Word2Vec.v3/files/class_data.csv\",\"a\",encoding=\"utf-8\")\n#f.close()\n#fwrite = open(\"C:/Users/Lenovo/Desktop/new.txt\", \"a\",encoding=\"utf-8\")\n#wp = open(\"C:/Users/Lenovo/Desktop/Bitirme/Word2Vec.v3/files/excelProducts.txt\", \"r\",encoding=\"utf-8\")\n#df = pd.read_csv('C:/Users/Lenovo/Desktop/product_catalog (1).csv')\n#df_clean = df.drop_duplicates(subset=['ProductName', 'ManufacturerName'])\n\"\"\"\"\nwbook = Workbook() \nsheet1 = wbook.add_sheet('Sheet 1', cell_overwrite_ok=True) \n \nsheet1.write(0, 0, 'Ürün adı')\nsheet1.write(0, 1, 'Arama Kelimeleri') \nj=1\nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \nsheet.cell_value(0, 0) \nfor i in range(1,sheet.nrows):\n\türün =sheet.cell_value(i,0)\n\taramakelimesi = sheet.cell_value(i,1)\n\tif ürün!=\"NULL\" and aramakelimesi != \"NULL\" :\n\n\t\tsheet1.write(j, 0, ürün)\n\t\tsheet1.write(j, 1, aramakelimesi)\n\t\tj+=1\n\nwbook.save('C:/Users/Lenovo/Desktop/tam_eslesme.xls')\n\"\"\"\nresult = dict()\nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \nsheet.cell_value(0, 0) \nfor i in range(0,sheet.nrows): \n\tif sheet.cell_value(i,2) is not None:\n\t\tif sheet.cell_value(i,2) in result:\n\t\t\tresult[str(sheet.cell_value(i,2))]+=1\n\t\telse:\n\t\t\tresult[str(sheet.cell_value(i,2))]=1\n\n\nprint(sheet.nrows)\nprint(result)\n\"\"\"\n\nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \nsheet.cell_value(0, 0) \nfor i in range(1,sheet.nrows):\n\ttext =sheet.cell_value(i,0)\n\tkeys = sheet.cell_value(i,1).replace(',', '')\n\ttext += ','+keys+'\\n'\n\tprint(text)\n\tf.write(text)\n\nlist(df_clean['ProductName'])\ncount =0\nfor sentence in wp:\n sentence = str(sentence).rstrip()\n #sentence = sentence.split(',')[0]\n if sentence not in list(df_clean['ProductName']):\n \tprint(sentence)\n \tsentence=sentence+'\\n'\n \tfwrite.write(sentence)\n \tcount+=1\nprint(count)\n\t\n\"\"\"","repo_name":"simgesaricayir/Machine-Learning-Supported-Semantic-Search-Engine","sub_path":"Tests/productRead.py","file_name":"productRead.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25989281076","text":"import torch\nfrom sklearn.cluster import KMeans\nfrom tqdm import tqdm\nimport random\n\nfrom .setup import device\nfrom .config import n_classes, n_features\nfrom .utils import show_tensor_images\nfrom .phase2 import dataloader2, encoder, generator2\n\n# Encode features by class label\nfeatures = {}\nfor (x, _, inst, _) in tqdm(dataloader2):\n x = x.to(device)\n inst = inst.to(device)\n area = inst.size(2) * inst.size(3)\n\n # Get pooled feature map\n with torch.no_grad():\n feature_map = encoder(x, inst)\n\n for i in torch.unique(inst):\n label = i if i < 1000 else i // 1000\n label = int(label.flatten(0).item())\n\n # All indices should have same feature per class from pooling\n idx = torch.nonzero(inst == i, as_tuple=False)\n n_inst = idx.size(0)\n idx = idx[0, :]\n\n # Retrieve corresponding encoded feature\n feature = feature_map[idx[0], :, idx[2], idx[3]].unsqueeze(0)\n\n # Compute rate of feature appearance (in official code, they compute per block)\n block_size = 32\n rate_per_block = 32 * n_inst / area\n rate = torch.ones((1, 1), device=device).to(feature.dtype) * rate_per_block\n\n feature = torch.cat((feature, rate), dim=1)\n if label in features.keys():\n features[label] = torch.cat((features[label], feature), dim=0)\n else:\n features[label] = feature\n\n\n# Cluster features by class label\nk = 10\ncentroids = {}\nfor label in range(n_classes):\n if label not in features.keys():\n continue\n feature = features[label]\n\n # Thresholding by 0.5 isn't mentioned in the paper, but is present in the\n # official code repository, probably so that only frequent features are clustered\n feature = feature[feature[:, -1] > 0.5, :-1].cpu().numpy()\n\n if feature.shape[0]:\n n_clusters = min(feature.shape[0], k)\n kmeans = KMeans(n_clusters=n_clusters).fit(feature)\n centroids[label] = kmeans.cluster_centers_\n\ndef infer(label_map, instance_map, boundary_map):\n # Sample feature vector centroids\n b, _, h, w = label_map.shape\n feature_map = torch.zeros((b, n_features, h, w), device=device).to(label_map.dtype)\n\n for i in torch.unique(instance_map):\n label = i if i < 1000 else i // 1000\n label = int(label.flatten(0).item())\n\n if label in centroids.keys():\n centroid_idx = random.randint(0, centroids[label].shape[0] - 1)\n idx = torch.nonzero(instance_map == int(i), as_tuple=False)\n\n feature = torch.from_numpy(centroids[label][centroid_idx, :]).to(device)\n feature_map[idx[:, 0], :, idx[:, 2], idx[:, 3]] = feature\n\n with torch.no_grad():\n x_fake = generator2(torch.cat((label_map, boundary_map, feature_map), dim=1))\n return x_fake\n\nfor x, labels, insts, bounds in dataloader2:\n x_fake = infer(labels.to(device), insts.to(device), bounds.to(device))\n show_tensor_images(x_fake.to(x.dtype))\n show_tensor_images(x)\n break","repo_name":"hydrousme2/pix2pixhd","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"72245585866","text":"#dit is het project voor vandaag\n#code van Jordy\n\nimport math\n\nm2 = float(input(\"Hoeveel vierkante meter? \"))\nkeuze = input(\"Welke klinkers? 10x10, 12x12 of 14x14? \")\nkilometers = float(input(\"hoeveel km wordt afgelegd? \"))\nopbreek = input(\"Moeten er opgebroken worden eerst? ja of nee\")\n\nprijsTotaal = 0\n\nif keuze == \"10x10\":\n prijsKlinkers = m2 * 14\nelif keuze == \"12x12\":\n prijsKlinkers = m2 * 16\nelse:\n prijsKlinkers = m2 * 16.5\n\nif opbreek == \"ja\":\n werkUren = math.ceil((m2 / 12) + (m2 / 15))\n prijsWerk = werkUren * 40\nelse:\n werkUren = math.ceil(m2 / 12)\n prijsWerk = werkUren * 40\n\nif kilometers > 10:\n reisKosten = 5 + 0.30 * kilometers\nelse:\n reisKosten = 5\n\nprijsTotaal = prijsWerk + prijsKlinkers + reisKosten\n\nprint(\"Klinkers kosten\", prijsKlinkers, \"euro\")\nprint(\"Werk kosten zijn\", prijsWerk, \"euro\")\nprint(\"Totaal prijs bedraagt\", prijsTotaal, \"euro\")\n","repo_name":"bjornlecis/PychamnaarGithub","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70145045706","text":"# coding=utf-8\n#################\n# Utilities\n#################\nimport uuid\nfrom typing import Union\n\n\nclass Singleton(type):\n \"\"\"\n Only allows one instantiation. On subsequent __init__ calls, returns the first instance\n \"\"\"\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\ndef clamp(value: Union[int, float], min_: Union[int, float], max_: Union[int, float]) -> Union[int, float]:\n \"\"\"\n Clamps the value between minimum and maximum values.\n :param value: number to clamp\n :param min_: minimum value\n :param max_: maximum value\n :return: clamped number\n \"\"\"\n # If inside the boundaries, return the actual value\n if min_ <= value <= max_:\n return value\n # When going over the boundary, return min/max\n elif value < min_:\n return min_\n else:\n return max_\n\n\ndef resolve_time(delta: int, sep: str = \"\") -> str:\n \"\"\"\n Converts an int to its human-friendly representation\n :param delta: time in seconds\n :param sep: string separator\n :return: string\n \"\"\"\n if type(delta) is not int:\n delta = int(delta)\n\n years, days, hours, minutes = 0, 0, 0, 0\n\n # Calculate best representations of the number\n while True:\n if delta >= 60 * 60 * 24 * 365: # 1 Year\n years += 1\n delta -= 31556926\n\n if delta >= 60 * 60 * 24: # 1 Day\n days += 1\n delta -= 86400\n\n elif delta >= 60 * 60: # 1 hour\n hours += 1\n delta -= 3600\n\n elif delta >= 60: # 1 minute\n minutes += 1\n delta -= 60\n\n else:\n break\n\n # Form calculations into a string\n fields = []\n if years:\n fields.append(f\"{years}y\")\n if days:\n fields.append(f\"{days}d\")\n if hours:\n fields.append(f\"{hours}h\")\n if minutes:\n fields.append(f\"{minutes}m\")\n fields.append(f\"{delta}s\")\n\n # If tm is less than a minute, do not add \"and\".\n return sep.join(fields)\n\n\ndef make_random_song_id(length=12) -> int:\n return int(str(uuid.uuid4().int)[:length])\n","repo_name":"DefaultSimon/Soundcube","sub_path":"soundcube/core/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10484134526","text":"\"\"\"\nModels for \"webcast invitees\" package.\n\"\"\"\n\nfrom sqlalchemy import CheckConstraint, UniqueConstraint\n\nfrom app import db\nfrom app.base.models import BaseModel\nfrom app.base.model_fields import LCString, ChoiceString\nfrom app.webcast_resources.webcast_invitees import constants as WEBCASTINVITEE\nfrom app.base import constants as APP\n# related model imports done in webcasts/__init__\n\n\nclass WebcastInvitee(BaseModel):\n\n __tablename__ = 'webcast_invitee'\n\n created_by = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_invitee_created_by_fkey', ondelete='CASCADE'),\n nullable=False)\n updated_by = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_invitee_updated_by_fkey', ondelete='CASCADE'),\n nullable=False)\n webcast_id = db.Column(db.BigInteger, db.ForeignKey(\n 'webcast.id', name='webcast_invitee_webcast_id_fkey',\n ondelete='CASCADE'), nullable=False)\n\n invitee_id = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_invitee_invitee_id_fkey', ondelete='CASCADE'))\n invitee_email = db.Column(LCString(128))\n is_mail_sent = db.Column(db.Boolean, default=False)\n email_status = db.Column(ChoiceString(APP.EMAIL_STATUS_CHOICES),\n nullable=False, default=APP.EMAIL_NOT_SENT)\n # if email is provided, then first_name, last_name and designation\n # is expected (not required)\n invitee_first_name = db.Column(db.String(128))\n invitee_last_name = db.Column(db.String(128))\n invitee_designation = db.Column(db.String(128))\n\n status = db.Column(ChoiceString(WEBCASTINVITEE.WBCT_INV_STATUS_CHOICES),\n nullable=False, default=WEBCASTINVITEE.INVITED)\n\n conference_url = db.Column(db.String(256))\n\n # the actual final user, i.e after creating guest account, or copy of\n # user_id, for already existing system users who directly join, or users\n # from contacts\n user_id = db.Column(db.BigInteger, db.ForeignKey(\n 'user.id', name='webcast_invitee_user_id_fkey', ondelete='CASCADE'))\n\n # multi column\n __table_args__ = (\n CheckConstraint('((invitee_id IS NOT NULL) OR '\n '(invitee_email IS NOT NULL))',\n name='c_check_wbcinv_invitee_id_invitee_email_'\n 'not_all_null_key'),\n UniqueConstraint('webcast_id', 'invitee_email',\n name='c_webcast_id_invitee_email_key'),\n UniqueConstraint('webcast_id', 'invitee_id',\n name='c_webcast_id_invitee_id_key'),\n )\n\n # relationships\n webcast = db.relationship('Webcast', backref=db.backref(\n 'webcast_invitees', lazy='dynamic', passive_deletes=True))\n invitee = db.relationship('User', backref=db.backref(\n 'webcasts_invited', lazy='dynamic'),\n foreign_keys='WebcastInvitee.invitee_id')\n crm_group = db.relationship('User', backref=db.backref(\n 'webcast_crm_group', lazy='dynamic'),\n foreign_keys='WebcastInvitee.user_id')\n webcast_external_invitee = db.relationship('Webcast', backref=db.backref(\n 'external_invitees', lazy='dynamic', passive_deletes=True))\n invitee_j = db.relationship(\n 'Webcast', secondary='user',\n backref=db.backref('invited', uselist=False),\n foreign_keys='[WebcastInvitee.webcast_id, WebcastInvitee.invitee_id, '\n 'WebcastInvitee.invitee_email]',\n primaryjoin='Webcast.row_id == WebcastInvitee.webcast_id',\n secondaryjoin='or_(WebcastInvitee.invitee_id == User.row_id, '\n 'WebcastInvitee.invitee_email == User.email)',\n viewonly=True)\n\n def __init__(self, created_by=None, updated_by=None,\n invitee_id=None, *args, **kwargs):\n self.created_by = created_by\n self.updated_by = updated_by\n self.invitee_id = invitee_id\n super(WebcastInvitee, self).__init__(*args, **kwargs)\n\n def __repr__(self):\n return '' % (self.row_id)\n","repo_name":"Witzcode0/Exchange-connect","sub_path":"app/webcast_resources/webcast_invitees/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72807739465","text":"\nfrom collections import deque\nimport sys\n\n\ndef solution(p, n, arr):\n if arr[0] == '':\n arr = deque()\n else:\n arr = deque(arr)\n check = 1\n for i in range(len(p)):\n if p[i] == 'R':\n check = check*-1\n else:\n if len(arr) < 1:\n return 'error'\n else:\n if check == 1:\n arr.popleft()\n else:\n arr.pop()\n arr = list(arr)\n if check == 1:\n return f'[{\",\".join(arr)}]'\n\n return f'[{\",\".join(arr[::-1])}]'\n\n\ndef main():\n T = int(sys.stdin.readline())\n answer = []\n for i in range(T):\n p = list(sys.stdin.readline().split()[0])\n n = int(sys.stdin.readline())\n arr = sys.stdin.readline().rstrip()[1:-1].split(\",\")\n answer.append(solution(p, n, arr))\n\n for ele in answer:\n print(ele)\n\nif __name__ == \"__main__\":\n main()","repo_name":"fineman999/Algorithm","sub_path":"BaekJoon/Gold/AC.py","file_name":"AC.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71172233225","text":"import argparse\n\nimport cv2\nimport einops\nimport numpy as np\nimport polyscope as ps\nimport polyscope.imgui as psim\nimport scipy\nimport torch\nfrom kornia.filters import median_blur\nfrom torch.distributions.utils import clamp_probs\n\nfrom gans.coords import CoordBridge\nfrom gans.models.builder import build_generator\nfrom gans.models.ops import GumbelSigmoid\nfrom gans.pretrained import autoload_ckpt\nfrom gans.utils import colorize, cycle, init_random_seed, tanh_to_sigmoid\n\n\ndef visualize_2d(G, coord, args, steps, interp_fn):\n def generate():\n imgs = G(\n z=torch.from_numpy(interp_fn(next(steps))).float().to(args.device),\n angle=coord.angle,\n truncation_psi=args.truncation_psi,\n input_w=True,\n )\n grid = [tanh_to_sigmoid(imgs[\"image\"])]\n if \"image_orig\" in imgs:\n grid = [imgs[\"raydrop_logit\"].sigmoid()] + grid\n grid = [tanh_to_sigmoid(imgs[\"image_orig\"])] + grid\n grid = torch.cat(grid, dim=2)\n grid = colorize(grid)\n return grid[0].cpu().numpy().transpose(1, 2, 0)\n\n print('press \"q\" to quit')\n while True:\n cv2.imshow(\"image\", generate()[..., ::-1])\n if cv2.waitKey(10) == ord(\"q\"):\n break\n\n\ndef visualize_3d(G, coord, args, steps, interp_fn):\n # Polyscope setting\n ps.set_program_name(\"Interpolating point clouds\")\n ps.set_SSAA_factor(3)\n ps.set_build_gui(False)\n ps.init()\n ps.set_ground_plane_mode(\"shadow_only\")\n ps.set_up_dir(\"z_up\")\n ps.set_ground_plane_height_factor(0.1)\n ps.set_shadow_darkness(0.1)\n ps.look_at((-1, -1, 1), (0, 0, 0))\n pts_kwargs = dict(radius=0.0005, color=(0, 0, 0))\n in_updating = True\n z = None\n psi = args.truncation_psi\n\n def render():\n nonlocal z, psi, in_updating\n\n # GUIs\n psim.PushItemWidth(150)\n if in_updating:\n if psim.Button(\"Stop\"):\n in_updating = False\n else:\n if psim.Button(\"Resume\"):\n in_updating = True\n _, psi = psim.SliderFloat(\"Truncation trick\", psi, v_min=-1, v_max=1)\n psim.PopItemWidth()\n\n # Generation\n if in_updating:\n z = torch.from_numpy(interp_fn(next(steps))).float().to(args.device)\n imgs = G(z=z, angle=coord.angle, truncation_psi=psi, input_w=True)\n\n # Convert depth to point cloud\n inv_depth = tanh_to_sigmoid(imgs[\"image\"])\n points = coord.convert(inv_depth, \"inv_depth_norm\", \"point_map\")\n points = median_blur(points, (3, 3))\n normal = coord.convert(points, \"point_map\", \"normal_map\")\n normal = tanh_to_sigmoid(normal)\n points = points / coord.max_depth\n points = einops.rearrange(points, \"b c h w -> b (h w) c\")\n colors = einops.rearrange(normal, \"b c h w -> b (h w) c\")\n points = points[0].cpu().numpy()\n colors = colors[0].cpu().numpy()\n\n if not ps.has_point_cloud(\"lidar\"):\n ps.register_point_cloud(\"lidar\", points, **pts_kwargs)\n else:\n ps.get_point_cloud(\"lidar\").update_point_positions(points)\n ps.get_point_cloud(\"lidar\").add_color_quantity(\"n\", colors, enabled=True)\n\n ps.set_user_callback(render)\n ps.show()\n\n\nif __name__ == \"__main__\":\n # setting\n torch.set_grad_enabled(False)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ckpt_path\", type=str, required=True)\n parser.add_argument(\"--mode\", choices=[\"2d\", \"3d\"], default=\"2d\")\n parser.add_argument(\"--num_anchors\", type=int, default=10)\n parser.add_argument(\"--truncation_psi\", type=float, default=0.7)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--device\", choices=[\"cuda\", \"cpu\"], default=\"cuda\")\n args = parser.parse_args()\n\n init_random_seed(args.seed)\n\n # config\n ckpt = autoload_ckpt(args.ckpt_path)\n cfg = ckpt[\"cfg\"]\n\n # coord converter\n H, W = cfg.model.generator.synthesis_kwargs.resolution\n coord = CoordBridge(\n num_ring=H,\n num_points=W,\n min_depth=cfg.dataset.min_depth,\n max_depth=cfg.dataset.max_depth,\n angle_file=f\"data/coords/{cfg.dataset.name}.npy\",\n )\n coord.to(args.device)\n\n # generator\n G = build_generator(cfg.model.generator)\n G.load_state_dict(ckpt[\"G_ema\"])\n G.eval().to(args.device)\n\n # make deterministic\n uniforms = clamp_probs(torch.rand(1, H, W, device=args.device))\n noise = uniforms.log() - (-uniforms).log1p()\n for n, m in G.named_modules():\n if isinstance(m, GumbelSigmoid):\n m.register_forward_hook(lambda _m, i, _o: ((i[0] + noise) > 0.0).float())\n if hasattr(m, \"use_fp16\"):\n m.use_fp16 = False\n\n # setup latent codes\n zs = []\n z_dim = cfg.model.generator.mapping_kwargs.in_ch\n for _ in range(args.num_anchors):\n noise = torch.randn(z_dim, device=args.device)\n noise /= noise.pow(2).mean(dim=0, keepdim=True).add(1e-8).sqrt()\n zs.append(noise)\n zs = G.forward_mapping(torch.stack(zs))\n\n # build an interpolation path between the anchors\n num_frames = int(90 * args.num_anchors)\n interp_fn = scipy.interpolate.interp1d(\n x=np.arange(-args.num_anchors * 2, args.num_anchors * 3),\n y=np.tile(zs.cpu().numpy(), [5] + [1] * (zs.ndim - 1)),\n kind=\"cubic\",\n axis=0,\n )\n steps = np.linspace(0, args.num_anchors, num_frames, endpoint=False)\n steps = cycle(list(steps[:, None]))\n\n if args.mode == \"2d\":\n visualize_2d(G, coord, args, steps, interp_fn)\n elif args.mode == \"3d\":\n visualize_3d(G, coord, args, steps, interp_fn)\n else:\n pass\n","repo_name":"kazuto1011/dusty-gan-v2","sub_path":"demo_interpolation.py","file_name":"demo_interpolation.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"71649260744","text":"\nn = int(input())\ns = list(map(int, input().split()))\n\nplayer_1 = 0\nplayer_2 = 0\nmove = 0\n\nwhile s:\n move += 1\n if s[0] > s[-1]:\n if move % 2 != 0:\n player_1 += s.pop(0)\n \n else:\n player_2 += s.pop(0)\n else:\n if move % 2 != 0:\n player_1 += s.pop(-1)\n else:\n player_2 += s.pop(-1)\n\nprint(player_1, player_2)","repo_name":"thevadimspivak/codeforces_solutions_in_python","sub_path":"problems/A/381A.py","file_name":"381A.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72878366025","text":"from random import randint\nfrom random import random\nfrom math import exp\nfrom math import log\nimport pygame\nimport sys\n\nswitch_time = 0\nframes_per_sec = 60 # 1 (fps) = 1 (sec)\nblock_size = (50, 50)\nbs_size = (20, 20)\nroad_width = 10\nwindow_size = ((block_size[0] * 10 + road_width * 9), (block_size[1] * 10 + road_width * 9))\nunit = 590 / 25\nalgo = ['Best Effort', 'Entropy', 'Threshold', 'My Algo']\np_min = 15\nentropy = 20\n\n \nspeed = 0.02 * unit * 10# km / sec\nforward_prob = 1 / 2\nuturn_prob = 1 / 16\nleft_prob = 7 / 32\nright_prob = 7 / 32\ntransmission_power = 120 # dB\narrival_rate = 2 / 3600 # calls / sec\nservice_time = 3 * 60 # sec\nfreq_table = [i * 100 for i in range(1, 11)]\ncar_entering_rate = (1/12) * exp(-(1/12))\n\n\nalice_blue = (240, 248, 255) # for bs\nchartreuse1 = (127,255,0)\ncadetblue1 = (152,245,255)\ncyan2 = (0,238,238)\ndarkolivegreen1 = (202,255,112)\ndarkorchid = (153,50,204)\ndodgerblue2 = (28,134,238)\nfirebrick2 = (238,44,44)\nhotpink = (255,105,180)\nlightsalmon3 = (205,129,98)\norangered1 = (255,69,0)\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n\ntop_border_y = 0\nleft_border_x = 0\nbottom_border_y = window_size[1]\nright_border_x = block_size[0] * 10 + road_width * 9\n\ncolor_arr = (chartreuse1, cadetblue1,cyan2, darkolivegreen1, darkorchid, dodgerblue2, firebrick2, hotpink, lightsalmon3, orangered1)\nbs_arr = []\ncar_arr = []\n\nclock = pygame.time.Clock()\npygame.init()\nwindow = pygame.display.set_mode(window_size)\n\ncar_group = pygame.sprite.Group()\nblock_group = pygame.sprite.Group()\nbase_station_group = pygame.sprite.Group()\nfont = pygame.font.match_font('arial')\n\n\ndef calculate_receiving_power(fc, dis):\n return transmission_power - (32.45 + 20 * (log(fc, 10)) + 20 * (log(dis, 10)))\n\ndef set_base_station_and_block():\n for i in range(10):\n for j in range(10):\n obj = block(i, j)\n block_group.add(obj)\n rand = randint(1, 10)\n if rand == 1:\n index = randint(0, 9)\n obj = base_station(i, j, index)\n bs_arr.append(obj)\n base_station_group.add(obj)\n \ndef best_effort_find_the_base_station(car):\n max_receiving_power = -(sys.maxsize)\n max_index = -1\n for i, bs in enumerate(bs_arr):\n dis = ((bs.rect.centerx - car.rect.centerx) ** 2 + (bs.rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n receiving_power = calculate_receiving_power(bs.freq, dis_in_km)\n if receiving_power > max_receiving_power:\n max_index = i\n max_receiving_power = receiving_power\n \n car.color = bs_arr[max_index].color\n car.received_power = round(max_receiving_power, 2)\n car.connected_bs = max_index\n \ndef entropy_find_the_base_station(car):\n max_receiving_power = -(sys.maxsize)\n max_index = -1\n for i, bs in enumerate(bs_arr):\n dis = ((bs.rect.centerx - car.rect.centerx) ** 2 + (bs.rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n receiving_power = calculate_receiving_power(bs.freq, dis_in_km)\n if receiving_power > max_receiving_power:\n max_index = i\n max_receiving_power = receiving_power\n # calculate current receiving power\n if car.connected_bs != -1: \n dis = ((bs_arr[car.connected_bs].rect.centerx - car.rect.centerx) ** 2 + (bs_arr[car.connected_bs].rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n car.received_power = calculate_receiving_power(bs_arr[car.connected_bs].freq, dis_in_km)\n car.received_power = round(car.received_power, 2)\n \n if (max_receiving_power - car.received_power) > entropy:\n car.color = bs_arr[max_index].color\n car.received_power = round(max_receiving_power, 2)\n car.connected_bs = max_index\n\ndef threshold_find_the_base_station(car):\n if car.connected_bs != -1: \n dis = ((bs_arr[car.connected_bs].rect.centerx - car.rect.centerx) ** 2 + (bs_arr[car.connected_bs].rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n car.received_power = calculate_receiving_power(bs_arr[car.connected_bs].freq, dis_in_km)\n car.received_power = round(car.received_power, 2)\n if car.received_power > p_min :\n return\n \n max_receiving_power = -(sys.maxsize)\n max_index = -1\n for i, bs in enumerate(bs_arr):\n dis = ((bs.rect.centerx - car.rect.centerx) ** 2 + (bs.rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n receiving_power = calculate_receiving_power(bs.freq, dis_in_km)\n if receiving_power > max_receiving_power:\n max_index = i\n max_receiving_power = receiving_power\n \n car.color = bs_arr[max_index].color\n car.received_power = round(max_receiving_power, 2)\n car.connected_bs = max_index\n \ndef entropy_modified_find_the_base_station(car): \n max_receiving_power = -(sys.maxsize)\n max_index = -1\n \n shortest_index = -1\n shortest_dis_in_km = sys.maxsize\n \n for i, bs in enumerate(bs_arr):\n dis = ((bs.rect.centerx - car.rect.centerx) ** 2 + (bs.rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n receiving_power = calculate_receiving_power(bs.freq, dis_in_km)\n if receiving_power > max_receiving_power:\n max_index = i\n max_receiving_power = receiving_power\n if shortest_dis_in_km > dis_in_km:\n shortest_index = i\n shortest_dis_in_km = dis_in_km\n shortest_receiving_power = receiving_power\n # calculate current receiving power\n if car.connected_bs != -1: \n dis = ((bs_arr[car.connected_bs].rect.centerx - car.rect.centerx) ** 2 + (bs_arr[car.connected_bs].rect.centery - car.rect.centery) ** 2) ** 0.5\n dis_in_km = dis / unit\n car.received_power = calculate_receiving_power(bs_arr[car.connected_bs].freq, dis_in_km)\n car.received_power = round(car.received_power, 2)\n \n if (max_receiving_power - car.received_power) > entropy:\n if car.connected_bs == -1:\n car.color = bs_arr[max_index].color\n car.received_power = round(max_receiving_power, 2)\n car.connected_bs = max_index\n else: # find the nearest base station\n car.color = bs_arr[shortest_index].color\n car.received_power = round(shortest_receiving_power, 2)\n car.connected_bs = max_index \n \ndef create_car(i, j):\n if i == 0:\n x, y = block_size[0] * j + road_width * (j - 1), bottom_border_y\n elif i == 1:\n x, y = left_border_x, block_size[1] * j + road_width * (j - 1)\n elif i == 2:\n x, y = block_size[0] * j + road_width * (j - 1), top_border_y\n elif i == 3:\n x, y = right_border_x, block_size[1] * j + road_width * (j - 1)\n \n obj = car(x, y, i)\n if algorithm_select == 1:\n best_effort_find_the_base_station(obj)\n elif algorithm_select == 2:\n entropy_find_the_base_station(obj)\n elif algorithm_select == 3:\n threshold_find_the_base_station(obj)\n elif algorithm_select == 4:\n entropy_modified_find_the_base_station(obj)\n car_arr.append(obj)\n car_group.add(obj)\n \n \ndef if_needed_creating_car():\n for i in range(4): # 4 dirs.\n for j in range(1, 10):\n rand = random() # rand a floating number between (0, 1)\n if rand <= car_entering_rate: # create cars\n create_car(i, j)\n\nclass block(pygame.sprite.Sprite):\n def __init__(self, i, j):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface(block_size)\n self.image.fill(alice_blue)\n \n self.rect = self.image.get_rect()\n self.rect.x = (block_size[0] + road_width) * i\n self.rect.y = (block_size[1] + road_width) * j\n \n def update(self):\n return\n \nclass base_station(pygame.sprite.Sprite):\n def __init__(self, i, j, index):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface(bs_size)\n self.color = color_arr[index]\n self.image.fill(color_arr[index])\n self.freq = freq_table[index]\n \n self.rect = self.image.get_rect()\n self.rect.x = (block_size[0] + road_width) * i + ((block_size[0] - bs_size[0]) / 2)\n self.rect.y = (block_size[1] + road_width) * j + ((block_size[1] - bs_size[1]) / 2)\n \n p = randint(1, 4)\n if p == 1:\n self.rect.y = (block_size[1] + road_width) * j + ((block_size[1] - bs_size[1]) / 2) - (unit * 0.1)\n elif p == 2:\n self.rect.x = (block_size[0] + road_width) * i + ((block_size[0] - bs_size[0]) / 2) + (unit * 0.1)\n elif p == 3:\n self.rect.y = (block_size[1] + road_width) * j + ((block_size[1] - bs_size[1]) / 2) + (unit * 0.1)\n else:\n self.rect.x = (block_size[0] + road_width) * i + ((block_size[0] - bs_size[0]) / 2) - (unit * 0.1)\n \n def update(self):\n return\n\nclass car(pygame.sprite.Sprite):\n def __init__(self, x, y, dir):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((road_width,road_width))\n self.connected_bs = -1\n self.color = white\n self.image.fill(self.color)\n self.rect = self.image.get_rect() \n self.position_x = float(x) \n self.position_y = float(y)\n self.rect.x = x \n self.rect.y = y\n self.dir = dir \n self.received_power = -(sys.maxsize) \n \n def occur_intersection_check(self):\n x, y = self.rect.x, self.rect.y\n for i in range(9):\n for j in range(9):\n if x == (block_size[0] + (block_size[0] + road_width) * i) and y == (block_size[0] + (block_size[0] + road_width) * j):\n return True\n return False\n \n def move_car(self):\n if self.dir == 0:\n self.position_y -= speed\n elif self.dir == 1:\n self.position_x += speed\n elif self.dir == 2:\n self.position_y += speed\n else:\n self.position_x -= speed\n \n self.rect.x = round(self.position_x)\n self.rect.y = round(self.position_y)\n \n def update(self):\n if self.occur_intersection_check():\n rand = randint(1, 32)\n if 17 <= rand and rand <= 18:\n self.dir += 2\n elif 19 <= rand and rand <= 25:\n self.dir += 1\n elif rand > 25:\n self.dir += 3\n self.dir %= 4\n self.move_car() \n self.image.fill(self.color) \n return \n\ndef check_if_any_car_needs_to_be_removed():\n for car in car_arr:\n rect = car.rect\n if rect.left > right_border_x or rect.right < left_border_x or rect.top > bottom_border_y or rect.bottom < top_border_y:\n car.kill()\n car_arr.remove(car)\n \ndef display_bs_carrier_freq():\n for bs in bs_arr:\n carrier_freq = str(bs.freq) + ' MHz'\n bs_font = pygame.font.Font(font, 12)\n text = bs_font.render(carrier_freq, True, black)\n rect = text.get_rect()\n rect.centerx = bs.rect.centerx\n rect.centery = bs.rect.centery\n window.blit(text, rect)\n \ndef calculate_switch_times_and_draw_line(algorithm_select):\n for i, car in enumerate(car_arr):\n old_bs = car.connected_bs\n if algorithm_select == 1:\n best_effort_find_the_base_station(car)\n elif algorithm_select == 2:\n entropy_find_the_base_station(car)\n elif algorithm_select == 3:\n threshold_find_the_base_station(car)\n elif algorithm_select == 4:\n entropy_modified_find_the_base_station(car)\n received_power = str(car.received_power) + ' dB'\n pygame.draw.line(window , car.color, (bs_arr[car.connected_bs].rect.centerx, bs_arr[car.connected_bs].rect.centery), (car.rect.centerx, car.rect.centery), 1)\n car_font = pygame.font.Font(font, 14)\n text = car_font.render(received_power, True, car.color)\n rect = text.get_rect()\n rect.centerx = car.rect.x+10\n rect.centery = car.rect.y-10\n window.blit(text, rect)\n if car.connected_bs != old_bs:\n global switch_time\n switch_time += 1\n \n \ndef update(algorithm_select):\n check_if_any_car_needs_to_be_removed()\n display_bs_carrier_freq()\n calculate_switch_times_and_draw_line(algorithm_select)\n\ndef Restart():\n global switch_time\n switch_time = 0\n for car in car_arr:\n car.kill()\n car_arr.clear()\n \nif __name__ == '__main__':\n algorithm_select = 1\n set_base_station_and_block()\n end_of_game = False\n while True:\n clock.tick(frames_per_sec)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n end_of_game = True\n if event.type == pygame.KEYDOWN:\n if(event.key == pygame.K_1):\n Restart()\n algorithm_select = 1\n elif(event.key == pygame.K_2):\n Restart()\n algorithm_select = 2\n elif(event.key == pygame.K_3):\n Restart()\n algorithm_select = 3\n elif(event.key == pygame.K_4):\n Restart()\n algorithm_select = 4\n \n if_needed_creating_car()\n \n window.fill(white)\n block_group.draw(window)\n base_station_group.draw(window)\n car_group.draw(window)\n\n block_group.update()\n base_station_group.update()\n car_group.update()\n update(algorithm_select)\n pygame.display.update()\n \n print(\"Switch times : {:d}, Car number : {:d}\".format(switch_time, len(car_arr)))\n print(\"Algorithm : {}\".format(algo[algorithm_select - 1]))\n if end_of_game:\n break\n pygame.quit()","repo_name":"KyleShao1016/Wireless-Network-Handoff-Algorithm","sub_path":"always_oncall.py","file_name":"always_oncall.py","file_ext":"py","file_size_in_byte":14091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37138795846","text":"import sys\ninput = sys.stdin.readline\nsecret = list(input().rstrip())\n# print(secret)\ndp = [0 for _ in range(len(secret) + 1)]\ndp[0] = 1\ndp[1] = 1\nif secret[0] == \"0\":\n print(0)\nelse:\n for i in range(2, len(secret) + 1):\n if int(secret[i-1]) > 0:\n dp[i] += dp[i-1]\n if 10 <= int(secret[i-2] + secret[i-1]) <= 26:\n dp[i] += dp[i-2]\n# print(dp)\n print(dp[-1] % 1000000)\n\n","repo_name":"wony5248/Daily_Study","sub_path":"Coding_Study/06-09/백준 2011 암호코드.py","file_name":"백준 2011 암호코드.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21349370017","text":"#!/usr/bin/env python3\nimport os\n\n\nPLAYS = [93,35,66,15,6,51,49,67,16,77,80,8,1,57,99,92,14,9,13,23,33,11,43,50,60,96,40,25,22,39,56,18,2,7,34,68,26,90,75,41,4,95,71,30,42,5,46,55,27,98,79,12,65,73,29,28,17,48,81,32,59,63,85,91,52,21,38,31,61,83,97,62,44,70,19,69,36,47,74,58,78,24,72,0,10,88,37,87,3,45,82,76,54,84,20,94,86,53,64,89] # noqa\n\n\ndef read(relative_filepath):\n with open(relative_filepath, 'r+') as f:\n data = f.read()\n clean_data = data.strip()\n boards = clean_data.split('\\n\\n')\n\n all_boards = []\n for string_board in boards:\n all_boards.append(BingoBoard(string_board))\n\n return all_boards\n\n\nclass BingoBoard:\n size = 5\n \"\"\"\n A board represents a bingo board.\n It can be initialized, and checked for a win.\n It also has a method to sum up all unmarked numbers\n\n Data representation. A board is a 2D nested array.\n Index 0.\n Values are dicts, that look like this:\n {\n \"val\": 22,\n \"marked\": True\n }\n \"\"\"\n def __init__(self, text_input):\n \"\"\"\n Initializes a board from the string input\n \"\"\"\n self.board = []\n clean_input = text_input.strip()\n self.has_won = False\n for line in clean_input.split('\\n'):\n current_row = []\n for number in line.split(): # no parameter to split handles any run of whitespaces!\n current_row.append({\"val\": int(number), \"marked\": False})\n self.board.append(current_row)\n\n def mark(self, value):\n for line in self.board:\n for elt in line:\n if elt['val'] == int(value):\n elt['marked'] = True\n\n def is_solved(self) -> bool:\n # Check horizontal\n for row in self.board:\n marked_count = len([elt for elt in row if elt[\"marked\"] is True])\n if marked_count == self.size:\n self.has_won = True\n return True\n\n # Check vertical\n for j in range(self.size):\n marked_count = len([self.board[i][j] for i in range(self.size) if self.board[i][j][\"marked\"] is True])\n if marked_count == self.size:\n self.has_won = True\n return True\n return False\n\n def calc_unmarked_sum(self):\n total = 0\n for i in range(self.size):\n for j in range(self.size):\n current_elt = self.board[i][j]\n if current_elt['marked'] is False:\n total += current_elt['val']\n return total\n\n def __str__(self) -> str:\n return_str = \"\"\n for row in self.board:\n for elt in row:\n return_str += str(elt['val']).rjust(2) # pad for double digits\n if elt['marked'] is True:\n return_str += '(*) '\n else:\n return_str += '( ) '\n return_str += '\\n'\n return return_str\n\n def __repr__(self) -> str:\n return self.__str__()\n\n\ndef test():\n mock_board = \"\"\"3 82 68 26 93\n61 90 29 69 92\n60 94 99 6 83\n77 80 2 58 55\n59 65 95 38 62\n\"\"\"\n bingo_board = BingoBoard(mock_board)\n print(bingo_board)\n bingo_board.mark(77)\n bingo_board.mark(60)\n bingo_board.mark(61)\n bingo_board.mark(3)\n bingo_board.mark(59)\n print(bingo_board)\n print(bingo_board.is_solved())\n\n\nif __name__ == '__main__':\n name = os.path.basename(__file__).split('.py')[0]\n test()\n print(f\"Solving {name} for advent of code\")\n all_boards = read('inputs/day4.txt')\n\n over = False\n for play in PLAYS:\n for board in all_boards:\n if board.has_won is False:\n board.mark(play)\n if board.is_solved():\n print(\"The following board has won!\")\n print(board)\n total = board.calc_unmarked_sum()\n print(\"Play:\", play, \"Sum:\", total, \"play * sum\", play * total)\n","repo_name":"nichochar/advent-of-code-2021","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21639174477","text":"#!/usr/bin/env python3\nimport LCD1602\nimport time\n\ndef setup():\n\tLCD1602.init(0x27, 1)\t# init(slave address, background light)\n\tLCD1602.write(0, 0, 'Greetings!!')\n\tLCD1602.write(1, 1, 'from SunFounder')\n\ttime.sleep(2)\n\ndef loop():\n\tspace = ' '\n\tgreetings = 'Thank you for buying SunFounder Sensor Kit for Raspberry! ^_^'\n\tgreetings = space + greetings\n\twhile True:\n\t\ttmp = greetings\n\t\tfor i in range(0, len(greetings)):\n\t\t\tLCD1602.write(0, 0, tmp)\n\t\t\ttmp = tmp[1:]\n\t\t\ttime.sleep(0.8)\n\t\t\tLCD1602.clear()\n\ndef loop2():\n\tgreetings = 'Thank you for buying SunFounder Sensor Kit for Raspberry! ^_^ '\n\ti = 0\n\twhile True:\n\t\ti = (i + 1) % len(greetings)\n\n\t\tif i+32 < len(greetings):\n\t\t\ttmp = greetings[i:i+32]\n\t\telse:\n\t\t\ttmp = greetings[i:]\n\t\t\ttmp = tmp + greetings[:32-len(tmp)]\n\n\t\tLCD1602.write(0, 0, tmp)\n\t\ttime.sleep(0.8)\n\t\tLCD1602.clear()\n\ndef loop3():\n\tgreetings = 'Actions speak louder than words.'\n\ti = 0\n\twhile True:\n\t\ti = (i + 1) % len(greetings)\n\n\t\tif i+32 < len(greetings):\n\t\t\ttmp = greetings[i:i+32]\n\t\telse:\n\t\t\ttmp = greetings[i:]\n\t\t\ttmp = tmp + greetings[:32-len(tmp)]\n\n\t\tLCD1602.write(0, 0, tmp[0:16])\n\t\tLCD1602.write(0, 1, tmp[16:][::-1])\n\t\ttime.sleep(0.8)\n\t\tLCD1602.clear()\n\ndef destroy():\n\tpass\t\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tsetup()\n\t\ttime.sleep(2)\n\t\t# loop()\n\t\t# loop2()\n\t\tloop3()\n\t\twhile True:\n\t\t\tpass\n\texcept KeyboardInterrupt:\n\t\tdestroy()\n","repo_name":"greenflute/SunFounder_SensorKit_for_RPi2","sub_path":"Python/30_i2c_lcd1602.py","file_name":"30_i2c_lcd1602.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"72615346504","text":"#!/usr/bin/env python3\n\n#For all VEX Talon Motor control publishers 0=OFF, 1=FWD, -1=REV\n\n#import needed libraries\nimport rospy\nimport time\nfrom std_msgs.msg import Int16, Bool\n\n\n#class object for easier main node flow programming: stuck_fault message ROS subscriber\nclass stuck_fault_sub:\n #class level variable accessable in main program node\n stuck_fault_flag = Bool()\n #object initialize as subscriber\n def __init__(self):\n self.stuck_fault_subscriber = rospy.Subscriber('stuck_fault_state', Bool, self.stuck_fault_flag_callback)\n #callback function assigning msg recieved to class level varibale\n def stuck_fault_flag_callback(self, msg):\n self.stuck_fault_flag.data = msg.data\n\n\n#class object for easier main node flow programming: run_mining message ROS subscriber\nclass run_mining_sub:\n #class level variable accessable in main program node\n run_mining_flag = Bool()\n #object initialize as subscriber\n def __init__(self):\n self.run_mining_subscriber = rospy.Subscriber('run_mining_state', Bool, self.run_mining_flag_callback)\n #callback function assigning msg recieved to class level varibale\n def run_mining_flag_callback(self, msg):\n self.run_mining_flag.data = msg.data\n\n\n#class object for easier main node flow programming: run_hopper message ROS subscriber\nclass run_hopper_sub:\n #class level variable accessable in main program node\n run_hopper_flag = Bool()\n #object initialize as subscriber\n def __init__(self):\n self.run_hopper_subscriber = rospy.Subscriber('run_hopper_state', Bool, self.run_hopper_flag_callback)\n #callback function assigning msg recieved to class level varibale\n def run_hopper_flag_callback(self, msg):\n self.run_hopper_flag.data = msg.data\n\n#class object for easier main node flow programming: mining_motors command ROS publisher\nclass mining_motors_pub:\n #object initialize as publisher and pass command args from main program node\n def __init__(self, extensor = 0, collector = 0):\n self.extensor = extensor\n self.collector = collector\n #setup ROS publishing & publish passed command args\n self.pub_extensor = rospy.Publisher('extensor_cmd_signal', Int16, queue_size = 10)\n self.pub_collector = rospy.Publisher('collector_cmd_signal', Int16, queue_size = 10)\n self.pub_extensor.publish(self.extensor)\n self.pub_collector.publish(self.collector)\n\n#class object for easier main node flow programming: hopper_motors command ROS publisher\nclass hopper_motor_pub:\n #object initialize as publisher and pass command arg from main program node\n def __init__(self, hopper = 0):\n self.hopper = hopper\n #setup ROS publishing & publish passed command arg\n self.pub_hopper = rospy.Publisher('hopper_cmd_signal', Int16, queue_size = 10)\n self.pub_hopper.publish(self.hopper)\n\n\nif __name__ == '__main__':\n #initialize node\n rospy.init_node('mining_and_hopper_ctrl_node')\n #publish rate (Hz)\n rate = rospy.Rate(50)\n #null val for motor signals on startup / while waiting for execution\n non_cycle_null_val = 0\n\n #keep node running unless CTRL C pressed\n while not rospy.is_shutdown():\n #initialize subscribers\n stuck_fault = stuck_fault_sub()\n run_mining = run_mining_sub()\n run_hopper = run_hopper_sub()\n \n #if command to run mining is received activate system\n if run_mining.run_mining_flag.data == True:\n #start deploy timer\n deploy_timer = time.time() + 30\n while time.time() < deploy_timer:\n #publish extensor=EXT, collector=FWD\n mining_motors_pub(1, 1)\n #stuck_fault signal received?\n if stuck_fault.stuck_fault_flag.data == True:\n #shutdown mining, publish extensor=OFF, collector=OFF\n mining_motors_pub(0, 0)\n #add stuck_fault recovery cycle time to deployement timer\n deploy_timer += 10\n #start stuck recovery timer\n stuck_recovery_timer = time.time() + 10\n while time.time() < stuck_recovery_timer:\n #publish extensor=RTCT, collector=OFF\n mining_motors_pub(-1, 0)\n #print(\"stuck fault recovery retract\")\n rate.sleep()\n #publish extensor=OFF, collector=OFF for transition back to mining\n mining_motors_pub(0, 0)\n #print(\"deploying mining\")\n rate.sleep()\n #publish extensor=OFF, collector=OFF for transition to retraction cycle\n mining_motors_pub(0, 0)\n #start retract timer\n retract_timer = time.time() + 30\n while time.time() < retract_timer:\n #publish extensor=RTCT, collector=OFF\n mining_motors_pub(-1, 0)\n #print(\"retracting mining\")\n rate.sleep()\n\n \n #if command to run hopper is received activate system\n if run_hopper.run_hopper_flag.data == True:\n #set timer to run hopper for 30s\n hopper_timer = time.time() + 30\n while time.time() < hopper_timer:\n #publish hopper run FWD signal (1)\n hopper_motor_pub(1)\n #print(\"Running hopper\")\n rate.sleep()\n\n #when not actively cycling the mining or hopper systems publish OFF commands to motors\n mining_motors_pub(non_cycle_null_val, non_cycle_null_val)\n hopper_motor_pub(non_cycle_null_val)\n #print(\"listening\")\n\n rate.sleep()","repo_name":"KevinJordanENG/lunabotics23","sub_path":"ros_framework/install/lib/mining_and_hopper/mining_and_hopper_ctrl_node.py","file_name":"mining_and_hopper_ctrl_node.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6586658088","text":"from PyQt5 import QtCore, QtWidgets, QtGui\nimport ctypes\nfrom PyQt5.QtGui import QTextCursor\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox, QColorDialog, QFormLayout, QLineEdit, QPushButton, QInputDialog, \\\n QWidget, QHBoxLayout, QDialog\n\nimport Utils\nfrom Restore import Restore\nfrom Settings import Settings\n\nuser32 = ctypes.windll.user32\nwidth = user32.GetSystemMetrics(0)\nheight = user32.GetSystemMetrics(1)\n\nclass Ui_EditText(object):\n\n restore = None\n originalText = \"\"\n settings = None\n Text = None\n\n def setupUi(self, EditText):\n EditText.setObjectName(Utils.Constants.EDIT_TEXT)\n EditText.resize(width-500, height-500)\n self.settings = Settings()\n self.centralwidget = QtWidgets.QWidget(EditText)\n self.centralwidget.setObjectName(Utils.Constants.CENTRAL_WIDGET)\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout.setObjectName(Utils.Constants.GRID_LAYOUT)\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)\n self.verticalLayout.setObjectName(Utils.Constants.VERTICAL_LAYOUT)\n self.Text = QtWidgets.QPlainTextEdit(self.centralwidget)\n self.Text.setObjectName(Utils.Constants.TEXT)\n self.Text.textChanged.connect(self.changed_Text)\n self.Text.setStyleSheet(\"color: \" + self.settings.getColorText() + \";\"+\"background-color: \"+self.settings.getBackgroundColor()+\";\"+\"font: \"+self.settings.getSizeFont()+\"pt Comic Sans MS\")\n self.verticalLayout.addWidget(self.Text)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(Utils.Constants.HORIZONTAL_LAYOUT)\n self.words = QtWidgets.QLabel(self.centralwidget)\n self.words.setObjectName(Utils.Constants.WORDS)\n self.horizontalLayout.addWidget(self.words)\n self.letters = QtWidgets.QLabel(self.centralwidget)\n self.letters.setObjectName(Utils.Constants.LETTERS)\n self.horizontalLayout.addWidget(self.letters)\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setObjectName(Utils.Constants.LABEL)\n self.horizontalLayout.addWidget(self.label)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.gridLayout.addLayout(self.verticalLayout, 0, 0, 2, 2)\n EditText.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(EditText)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 877, 21))\n self.menubar.setObjectName(Utils.Constants.MENU_BAR)\n self.menuFile = QtWidgets.QMenu(self.menubar)\n self.menuFile.setObjectName(Utils.Constants.MENU_FILE)\n self.menuTool = QtWidgets.QMenu(self.menubar)\n self.menuTool.setObjectName(Utils.Constants.MENU_TOOL)\n EditText.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(EditText)\n self.statusbar.setObjectName(Utils.Constants.STATUSBAR)\n EditText.setStatusBar(self.statusbar)\n self.actionNew = QtWidgets.QAction(EditText)\n self.actionNew.setObjectName(Utils.Constants.ACTION_NEW)\n self.actionNew.triggered.connect(self.new_Listener)\n self.actionOpen = QtWidgets.QAction(EditText)\n self.actionOpen.setObjectName(Utils.Constants.ACTION_OPEN)\n self.actionOpen.triggered.connect(self.file_open)\n self.actionClose = QtWidgets.QAction(EditText)\n self.actionClose.setObjectName(Utils.Constants.ACTION_CLOSE)\n self.actionClose.triggered.connect(self.close_Listener)\n self.actionSave = QtWidgets.QAction(EditText)\n self.actionSave.setObjectName(Utils.Constants.ACTION_SAVE)\n self.actionSave.triggered.connect(self.file_save)\n self.actionTextColor = QtWidgets.QAction(EditText)\n self.actionTextColor.setObjectName(Utils.Constants.ACTION_TEXT_COLOR)\n self.actionTextColor.triggered.connect(self.textColor_Listener)\n self.actionBackgroundColor = QtWidgets.QAction(EditText)\n self.actionBackgroundColor.setObjectName(Utils.Constants.ACTION_BACKGROUND_COLOR)\n self.actionBackgroundColor.triggered.connect(self.backgroundColor_Listener)\n self.actionSizeFont = QtWidgets.QAction(EditText)\n self.actionSizeFont.setObjectName(Utils.Constants.ACTION_FONT_SIZE)\n self.actionSizeFont.triggered.connect(self.actionSizeFont_Listener)\n self.menuFile.addAction(self.actionNew)\n self.menuFile.addAction(self.actionOpen)\n self.menuFile.addAction(self.actionSave)\n self.menuFile.addAction(self.actionClose)\n self.menuTool.addAction(self.actionTextColor)\n self.menuTool.addAction(self.actionBackgroundColor)\n self.menuTool.addAction(self.actionSizeFont)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuTool.menuAction())\n self.retranslateUi(EditText)\n QtCore.QMetaObject.connectSlotsByName(EditText)\n\n self.restore = Restore()\n file = open(\"temp\", \"r\")\n lines = file.readlines()\n if len(lines) != 0:\n restore = QMessageBox()\n restore.setWindowTitle(Utils.Constants.EDIT_TEXT)\n restore.setText(Utils.Constants.ASK_RESTORE_DATA)\n restore.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)\n restore = restore.exec()\n text = \"\"\n if restore == QMessageBox.Yes:\n for x in lines:\n text = text + x\n self.Text.setPlainText(text)\n elif restore == QMessageBox.Cancel:\n self.restore.delete()\n\n\n def retranslateUi(self, EditText):\n _translate = QtCore.QCoreApplication.translate\n EditText.setWindowTitle(_translate(Utils.Constants.EDIT_TEXT, Utils.Constants.EDIT_TEXT))\n self.words.setText(_translate(Utils.Constants.EDIT_TEXT, \"Words: 0\"))\n self.letters.setText(_translate(Utils.Constants.EDIT_TEXT, \"Letters: 0\"))\n self.label.setText(_translate(Utils.Constants.EDIT_TEXT, \"UTF-8\"))\n self.menuFile.setTitle(_translate(Utils.Constants.EDIT_TEXT, \"File\"))\n self.menuTool.setTitle(_translate(Utils.Constants.EDIT_TEXT, \"Tools\"))\n self.actionNew.setText(_translate(Utils.Constants.EDIT_TEXT, \"New\"))\n self.actionOpen.setText(_translate(Utils.Constants.EDIT_TEXT, \"Open\"))\n self.actionClose.setText(_translate(Utils.Constants.EDIT_TEXT, \"Close\"))\n self.actionSave.setText(_translate(Utils.Constants.EDIT_TEXT, \"Save\"))\n self.actionTextColor.setText(_translate(Utils.Constants.EDIT_TEXT, \"Text Color\"))\n self.actionBackgroundColor.setText(_translate(Utils.Constants.EDIT_TEXT, \"Background Color\"))\n self.actionSizeFont.setText(_translate(Utils.Constants.EDIT_TEXT, \"Size Font\"))\n\n def changed_Text(self):\n self.words.setText(\"Words: \"+str(self.count_Words(self.Text.toPlainText())))\n self.letters.setText(\"Letters: \"+str(self.count_Letters(self.Text.toPlainText())))\n self.restore.push(self.Text.toPlainText())\n\n def count_Words(self, text):\n words = text.split()\n return len(words)\n\n def count_Letters(self, text):\n characters = list(text)\n count = 0\n for x in characters:\n if not (x == \"\\n\" or x == \" \"):\n count += 1\n return count\n\n def close_Listener(self):\n close = QMessageBox()\n close.setWindowTitle(Utils.Constants.EDIT_TEXT)\n close.setText(Utils.Constants.ASK_YOU_SURE)\n close.setStandardButtons(QMessageBox.Yes | QMessageBox.Save | QMessageBox.Cancel)\n close = close.exec()\n\n if close == QMessageBox.Yes:\n exit()\n elif close == QMessageBox.Save:\n self.file_save(True)\n elif close == QMessageBox.Cancel:\n pass\n\n def file_open(self):\n dlg = QFileDialog()\n dlg.setFileMode(QFileDialog.AnyFile)\n if dlg.exec_():\n filename = dlg.selectedFiles()\n text = \"\"\n try:\n file = open(filename[0], \"r\")\n for x in file.readlines():\n text = text + x\n self.Text.setPlainText(text)\n self.Text.moveCursor(QTextCursor.End)\n self.originalText = text\n file.close()\n except:\n print(Utils.Constants.ERROR_CAN_NOT_OPEN_FILE)\n\n def file_save(self, wants_exit):\n dlg = QFileDialog()\n try:\n filenames = dlg.getSaveFileName()\n file = open(filenames[0], \"w\")\n file.write(self.Text.toPlainText())\n self.originalText = self.Text.toPlainText()\n self.restore.saved()\n file.close()\n except:\n print(Utils.Constants.CANCELED)\n if wants_exit:\n exit()\n\n def new_Listener(self):\n if self.originalText == self.Text.toPlainText():\n self.Text.setPlainText(\"\")\n else:\n close = QMessageBox()\n close.setWindowTitle(Utils.Constants.EDIT_TEXT)\n close.setText(Utils.Constants.ASK_YOU_SURE)\n close.setStandardButtons(QMessageBox.Yes | QMessageBox.Save | QMessageBox.Cancel)\n close = close.exec()\n\n if close == QMessageBox.Yes:\n self.Text.setPlainText(\"\")\n elif close == QMessageBox.Save:\n self.file_save(True)\n elif close == QMessageBox.Cancel:\n pass\n\n def textColor_Listener(self):\n color = QColorDialog.getColor()\n self.Text.setStyleSheet(\"color: \"+color.name()+\";\"+\"background-color: \" + self.settings.getColorText() + \";\"+\"font: \"+self.settings.getSizeFont()+\"pt Comic Sans MS\")\n self.settings.setColorText(color.name())\n\n def backgroundColor_Listener(self):\n color = QColorDialog.getColor()\n self.Text.setStyleSheet(\"color: \" + self.settings.getColorText() + \";\"+\"background-color: \" + color.name() + \";\"+\"font: \"+self.settings.getSizeFont()+\"pt Comic Sans MS\")\n self.settings.setBackground(color.name())\n\n def actionSizeFont_Listener(self):\n self.dialog = QWidget()\n self.getSize()\n\n def getSize(self):\n i, okPressed = QInputDialog.getInt(self.dialog, Utils.Constants.CHANGE_SIZE, Utils.Constants.PERCENTAGE+\":\", int(self.settings.getSizeFont()), 5, 100, 1)\n if okPressed:\n try:\n self.Text.setStyleSheet(\"color: \" + self.settings.getColorText() + \";\" + \"background-color: \" + self.settings.getBackgroundColor() + \";\" + \"font: \" + str(i) + \"pt Comic Sans MS\")\n self.settings.setSizeFont(str(i))\n except Exception as e:\n print(e)\n\n\n","repo_name":"xRUIZxMATIASx/Custom-Edit-Text","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":10767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8834855261","text":"class Solution:\n def peakIndexInMountainArray(self, a: List[int]) -> int:\n # m = -1\n # loc = -1\n # for i in range(1 ,len(a)-1):\n # if a[i-1]< a[i] > a[i+1]:\n # m,loc = (a[i],i) if a[i] > m else (m,loc)\n # return loc\n l,r =0,len(a)-1\n while l < r:\n mid = (l+r)//2\n if a[mid] < a[mid+1]:\n l = mid+1\n else:\n r= mid\n return l","repo_name":"pavan-reddy8/Leetcode-problems","sub_path":"852-peak-index-in-a-mountain-array/852-peak-index-in-a-mountain-array.py","file_name":"852-peak-index-in-a-mountain-array.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35341964618","text":"from collections import namedtuple\n\n\nDataIngestionConfig = namedtuple(\n \"DataIngestionConfig\",\n [\n \"root_dir\",\n \"review_file_path\",\n \"extract_image_dir_name\",\n \"extract_product_csv_file_name\",\n ],\n)\n\nReviewSplitConfig = namedtuple(\n \"ReviewSplitConfig\",\n [\n \"root_dir\",\n \"review_csv_path\",\n \"battery_file_name\",\n \"display_file_name\",\n \"camera_file_name\",\n \"overall_file_name\",\n \"review_split_dir_name\",\n ],\n)\n\nTextPreprocessingConfig = namedtuple(\n \"TextPreprocessingConfig\",\n [\"root_dir\", \"review_file_path\", \"processed_data_file_path\", \"min_review_len\"],\n)\n\nPredictionConfig = namedtuple(\n \"PredictionConfig\",\n [\n \"root_dir\",\n \"splited_reviews_dir_path\",\n \"pretrain_model_path\",\n \"prediction_csv_file_path\",\n ],\n)\n\nTrainingConfig = namedtuple(\n \"TrainingConfig\",\n [\n \"root_dir\",\n \"model_dir\",\n \"model_file_name\",\n \"data_path\",\n \"epochs\",\n \"batch_size\",\n \"buffer_size\",\n \"vocab_size\",\n \"BiRnnUnits\",\n \"eval_data_per\",\n \"embedding_dim\",\n \"no_classes\",\n \"output_columns_name\",\n ],\n)\n\nPretrainedModelConfig = namedtuple(\n \"PretrainedModelConfig\", [\"pretrained_model_dir\", \"pretrained_model_name\"]\n)\n","repo_name":"Sathishmahi/Review_Scraper_with_Sentiment_Analysis","sub_path":"src/ReviewScraperwithSentimentAnalysis/entity/config_entity.py","file_name":"config_entity.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70148093704","text":"from random import randint\n\ngame_running = True\n\ndef calculate_monster_attack():\n return randint(monster[\"attack_min\"], monster[\"attack_max\"])\n\ndef game_ends(winner_name):\n print(winner_name + str(\" won the game\"))\n\nwhile game_running == True:\n new_round = True\n player = {\"name\": \"Jean\", \"attack\": 10, \"heal\": 16, \"health\": 100}\n monster = {\"name\": \"Max\", \"attack_min\": 11, \"attack_max\": 19, \"health\": 100}\n\n print(\"-----\"*5)\n\n print(\"Enter Player name\")\n player[\"name\"] = input()\n print(\"-----\" * 5)\n print(player[\"name\"]+\" has \" + str(player[\"health\"]) + \" hp\")\n print(monster[\"name\"] + \" has \" + str(monster[\"health\"]) + \" hp\")\n\n while new_round == True:\n\n counter = counter + 1\n playerwon= False\n monsterwon= False\n\n print(\"Please select action\")\n print(\"1 Attack\")\n print (\"2 Heal\")\n print (\"3 exit\")\n\n player_choice = input()\n\n if player_choice == \"1\":\n monster[\"health\"] = monster[\"health\"] - player[\"attack\"]\n if monster[\"health\"]<= 0:\n playerwon = True\n\n else:\n\n player[\"health\"] = player[\"health\"] - calculate_monster_attack()\n if player[\"health\"] <= 0:\n monsterwon= True\n\n print (monster[\"health\"])\n print (player[\"health\"])\n\n elif player_choice == \"2\":\n player[\"health\"]= player[\"health\"]+ player[\"heal\"]\n\n player[\"health\"] = player[\"health\"] - calculate_monster_attack()\n if player[\"health\"] <= 0:\n monsterwon = True\n\n elif player_choice == \"3\" :\n new_round = False\n game_running = False\n\n else:\n print (\"invalid statement\")\n\n if playerwon == False and monsterwon == False:\n print (player[\"name\"] + \" has \" + str(player[\"health\"]) + \" left\")\n print (monster[\"name\"] + \" has \" + str(monster[\"health\"]) + \" left\")\n\n elif playerwon== True:\n game_ends(player[\"name\"])\n round_result = {\"name\":player[\"name\"],\"health\": player[\"health\"], \"rounds\": counter}\n print(round_result)\n new_round= False\n\n elif monsterwon== True:\n game_ends(monster[\"name\"])\n round_result = {\"name\": player[\"name\"], \"health\": player[\"health\"], \"rounds\": counter}\n print(round_result)\n new_round = False\n\n\n\n","repo_name":"camleodev/jeu_de_combat","sub_path":"testpython.py","file_name":"testpython.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25026470974","text":"n = int(input())\n\nstrings = []\nfor _ in range(n):\n strings.append(input())\n \ns= list(strings[0])\n \n \nfor i in range(1,n):\n for j in range(len(s)):\n if s[j] != strings[i][j]:\n s[j] = '?'\nprint(''.join(s))\n \n","repo_name":"parkchanghyup/algorithm","sub_path":"python/백준/[python] 백준 - 명령프롬포트.py","file_name":"[python] 백준 - 명령프롬포트.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929168586","text":"from typing import List\nimport re\n\nclass Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n paragraph = paragraph.lower()\n paragraph = re.sub('[^a-zA-Z0-9 ]', '', paragraph)\n words = paragraph.split()\n cnt = dict()\n max_word = ''\n max_cnt = 0\n for word in words:\n if word in banned:\n continue\n if word not in cnt:\n cnt[word] = 1\n else:\n cnt[word] += 1\n if cnt[word] >= max_cnt:\n max_word = word\n max_cnt = cnt[word]\n return max_word","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/1회차/B04_MostCommonWord.py","file_name":"B04_MostCommonWord.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14994253085","text":"from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'kvmcp'\nurlpatterns = [\n url(r'^$', views.change_page, name='change_page'),\n url(r'activate/$', views.activate, name='activate'),\n url(r'timestamp/$', views.timestamp, name='timestamp'),\n url(r'report/$', views.report, name='report'),\n url(r'checkstatus/$', views.check_status, name='check_status')\n]","repo_name":"ovikosta/KvmPasswordChanger","sub_path":"kvmcp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7263281857","text":"\"\"\"\nID: thanhti1\nLANG: PYTHON3\nTASK: beads\n\"\"\"\n\n# code1 O(n)\nwith open('beads.in', 'r') as fin:\n n = int(fin.readline().strip())\n A = list(fin.readline().strip())\n\nA = [' '] + A + A + [' ']\nleft = [[0,0] for i in range(len(A))] #[r,b]\nfor i in range(1,len(A)-1):\n if A[i]=='r':\n left[i][0] = left[i-1][0] + 1\n left[i][1] = 0\n elif A[i] == 'b':\n left[i][1] = left[i-1][1] + 1\n left[i][0] = 0\n elif A[i] == 'w':\n left[i][0] = left[i - 1][0] + 1\n left[i][1] = left[i - 1][1] + 1\n\nright = [[0,0] for i in range(len(A))] #[r,b]\nfor i in range(1,len(A)-1):\n i = len(A)-1-i\n if A[i]=='r':\n right[i][0] = right[i+1][0] + 1\n right[i][1] = 0\n elif A[i] == 'b':\n right[i][1] = right[i+1][1] + 1\n right[i][0] = 0\n elif A[i] == 'w':\n right[i][0] = right[i + 1][0] + 1\n right[i][1] = right[i + 1][1] + 1\n\nans=0\nfor i in range(1,len(A)-1):\n ans=max(ans,max(left[i][0], left[i][1]+right[i+1][0], left[i][0]+right[i+1][1], right[i+1][1]))\nans=min(ans,n)\n\nwith open('beads.out', 'w') as fout:\n fout.write(str(ans) + '\\n')\n\n# Test 1: TEST OK [0.024 secs, 9296 KB]\n# Test 2: TEST OK [0.024 secs, 9420 KB]\n# Test 3: TEST OK [0.025 secs, 9380 KB]\n# Test 4: TEST OK [0.025 secs, 9356 KB]\n# Test 5: TEST OK [0.024 secs, 9336 KB]\n# Test 6: TEST OK [0.024 secs, 9320 KB]\n# Test 7: TEST OK [0.025 secs, 9348 KB]\n# Test 8: TEST OK [0.025 secs, 9460 KB]\n# Test 9: TEST OK [0.028 secs, 9504 KB]\n\n# #code2 O(n^2)\n# with open('beads.in', 'r') as fin:\n# size = int(fin.readline().strip())\n# A = list(fin.readline().strip())\n#\n# def n_break(i, dir):\n# color = 'w'\n# if dir > 0: #negative direction\n# i = i\n# else: #positive direction\n# i = (i+dir)%size\n# for n in range(size):\n# if color=='w' and A[i]!='w':\n# color=A[i]\n# elif color!='w' and A[i]!='w' and A[i]!=color:\n# break\n# i = (i+dir)%size #direction\n# return n\n#\n# ans=0\n# for i in range(size):\n# n = n_break(i,1) + n_break(i,-1)\n# ans = max(ans,n)\n# ans = min(ans, size)\n#\n# with open('beads.out', 'w') as fout:\n# fout.write(str(ans) + '\\n')\n\n# Test 1: TEST OK [0.029 secs, 9372 KB]\n# Test 2: TEST OK [0.024 secs, 9320 KB]\n# Test 3: TEST OK [0.025 secs, 9360 KB]\n# Test 4: TEST OK [0.025 secs, 9308 KB]\n# Test 5: TEST OK [0.024 secs, 9420 KB]\n# Test 6: TEST OK [0.024 secs, 9348 KB]\n# Test 7: TEST OK [0.027 secs, 9416 KB]\n# Test 8: TEST OK [0.025 secs, 9328 KB]\n# Test 9: TEST OK [0.025 secs, 9312 KB]\n\n# #code3 O(n^2)\n# with open('beads.in', 'r') as fin:\n# n = int(fin.readline().strip())\n# A = list(fin.readline().strip())\n#\n# A = A + A\n# ans=0\n# for i in range(n):\n# check = A[i]\n# print(check)\n# if check=='w':\n# state = 0\n# else:\n# state = 1\n# j=i\n# current = 0\n# while state <=2:\n# while j < n+i and (A[j]=='w' or A[j]==check):\n# j+=1\n# current +=1\n# # print(A[j])\n# check=A[j]\n# state+=1\n# # print(str(current)+'\\n')\n# ans = max(ans,current)\n#\n# with open('beads.out', 'w') as fout:\n# fout.write(str(ans) + '\\n')\n\n# Test 1: TEST OK [0.025 secs, 9312 KB]\n# Test 2: TEST OK [0.024 secs, 9352 KB]\n# Test 3: TEST OK [0.027 secs, 9416 KB]\n# Test 4: TEST OK [0.025 secs, 9232 KB]\n# Test 5: TEST OK [0.025 secs, 9312 KB]\n# Test 6: TEST OK [0.025 secs, 9344 KB]\n# Test 7: TEST OK [0.034 secs, 9280 KB]\n# Test 8: TEST OK [0.031 secs, 9316 KB]\n# Test 9: TEST OK [0.032 secs, 9232 KB]\n\n# ------- test 1 [length 33 bytes] ----\n# 29\n# wwwbbrwrbrbrrbrbrwrwwrbwrwrrb\n# ------- test 2 [length 6 bytes] ----\n# 3\n# rrr\n# ------- test 3 [length 81 bytes] ----\n# 77\n# rwrwrwrwrwrwrwrwrwrwrwrwbwrwbwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwrwr\n# ------- test 4 [length 21 bytes] ----\n# 17\n# wwwwwwwwwwwwwwwww\n# ------- test 5 [length 54 bytes] ----\n# 50\n# bbrrrbrrrrrrrrbrbbbrbrrbrrrrbbbrbrbbbbbrbrrrbbrbbb\n# ------- test 6 [length 11 bytes] ----\n# 8\n# rrwwwwbb\n# ------- test 7 [length 205 bytes] ----\n# 200\n# rrrrrrrrrrrrrrrrrrrrbbbbbbbbbbbbbbbbbbbbrrrrrrrrrrrrrrrrrrrrbbbbbbbbbbbbbbbbbbbbrrrrrrrrrrrrrrrrrrrrbbbbbbbbbbbbbbbbbbbbrrrrrrrrrrrrrrrrrrrrbbbbbbbbbbbbbbbbbbbbrrrrrrrrrrrrrrrrrrrrbbbbbbbbbbbbbbbbbbbb\n# ------- test 8 [length 355 bytes] ----\n# 350\n# rrbbrbbbwbwwbwbbbbwwrrbbwbrwbrwbbbrbrwrwbrwwwrrbbrrwrbbrwbwrwwwrbrwwwwwrwbwwwrrbrrbbbrbrbbbrbbbrbbwbbbbbrbrrbrwwbrrrrwbwrwrbbwbwrbrbrwwbrrbwbrwwbwwwbrbwrwbwbrbbbwrbwwrrrbwbwbbbbbrrwwwrbrwwrbbwrbbrbbrbwrrwwbrrrbrwbrwwrbwbwrrrbwrwrrbrbbwrwrbrwwwrwbwrrwwwwrrrwrrwbbwrwwrwrbwwbwrrrrbbwrbbrbwwwwwbrbbrbbrbrwbbwbwwbbbbbwwwrbwwbbbwrwwbbrrwrwbwrrwwwrrrwrrwww\n# ------- test 9 [length 338 bytes] ----\n# 333\n# rwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwbrwb\n","repo_name":"NoLoPhe/usaco2021","sub_path":"4.beads/beads.py","file_name":"beads.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21530116670","text":"# Utilities for integration with Home Assistant (directly or via MQTT)\n\nimport logging\n\nfrom .util import camel2slug\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# fixme: move (mapping to) hass component names to config file instead\n\n\nclass Instrument:\n def __init__(self, component, attr, name, icon=None):\n self.attr = attr\n self.component = component\n self.name = name\n self.vehicle = None\n self.icon = icon\n\n def __repr__(self):\n return self.full_name\n\n def configurate(self, **args):\n pass\n\n @property\n def slug_attr(self):\n return camel2slug(self.attr.replace(\".\", \"_\"))\n\n def setup(self, vehicle, mutable=True, **config):\n self.vehicle = vehicle\n\n if not mutable and self.is_mutable:\n _LOGGER.info(\"Skipping %s because mutable\", self)\n return False\n\n if not self.is_supported:\n _LOGGER.debug(\n \"%s (%s:%s) is not supported\",\n self,\n type(self).__name__,\n self.attr,\n )\n return False\n\n _LOGGER.debug(\"%s is supported\", self)\n\n self.configurate(**config)\n\n return True\n\n @property\n def vehicle_name(self):\n return self.vehicle.registration_number or self.vehicle.vin\n\n @property\n def full_name(self):\n return \"%s %s\" % (self.vehicle_name, self.name)\n\n @property\n def is_mutable(self):\n raise NotImplementedError(\"Must be set\")\n\n @property\n def is_supported(self):\n supported = \"is_\" + self.attr + \"_supported\"\n if hasattr(self.vehicle, supported):\n return getattr(self.vehicle, supported)\n if hasattr(self.vehicle, self.attr):\n return True\n return self.vehicle.has_attr(self.attr)\n\n @property\n def str_state(self):\n return self.state\n\n @property\n def state(self):\n if hasattr(self.vehicle, self.attr):\n return getattr(self.vehicle, self.attr)\n return self.vehicle.get_attr(self.attr)\n\n @property\n def attributes(self):\n return {}\n\n\nclass Sensor(Instrument):\n def __init__(self, attr, name, icon, unit):\n super().__init__(component=\"sensor\", attr=attr, name=name, icon=icon)\n self.unit = unit\n\n def configurate(self, scandinavian_miles=False, **config):\n if self.unit and scandinavian_miles and \"km\" in self.unit:\n self.unit = \"mil\"\n\n @property\n def is_mutable(self):\n return False\n\n @property\n def str_state(self):\n if self.unit:\n return \"%s %s\" % (self.state, self.unit)\n else:\n return \"%s\" % self.state\n\n @property\n def state(self):\n val = super().state\n if val and \"mil\" in self.unit:\n return val / 10\n else:\n return val\n\n\nclass FuelConsumption(Sensor):\n def __init__(self):\n super().__init__(\n attr=\"averageFuelConsumption\",\n name=\"Fuel consumption\",\n icon=\"mdi:gas-station\",\n unit=\"L/100 km\",\n )\n\n def configurate(self, scandinavian_miles=False, **config):\n if scandinavian_miles:\n self.unit = \"L/mil\"\n\n @property\n def state(self):\n val = super().state\n decimals = 2 if \"mil\" in self.unit else 1\n if val:\n return round(val / 10, decimals)\n\n\nclass Odometer(Sensor):\n def __init__(self, attr=\"odometer\", name=\"Odometer\"):\n super().__init__(\n attr=attr, name=name, icon=\"mdi:speedometer\", unit=\"km\"\n )\n\n @property\n def state(self):\n val = super().state\n if val:\n return int(round(val / 1000)) # m->km\n return 0\n\n\nclass JournalLastTrip(Sensor):\n def __init__(self):\n super().__init__(\n attr=\"trips\", name=\"Last trip\", unit=\"\", icon=\"mdi:book-open\"\n )\n\n @property\n def is_supported(self):\n return self.vehicle.is_journal_supported\n\n @property\n def trip(self):\n if self.vehicle.trips:\n return self.vehicle.trips[0][\"tripDetails\"][0]\n\n @property\n def start_address(self):\n return \"{}, {}\".format(\n self.trip[\"startPosition\"][\"streetAddress\"],\n self.trip[\"startPosition\"][\"city\"],\n )\n\n @property\n def end_address(self):\n return \"{}, {}\".format(\n self.trip[\"endPosition\"][\"streetAddress\"],\n self.trip[\"endPosition\"][\"city\"],\n )\n\n @property\n def start_time(self):\n return self.trip[\"startTime\"].astimezone(None)\n\n @property\n def end_time(self):\n return self.trip[\"endTime\"].astimezone(None)\n\n @property\n def duration(self):\n return self.end_time - self.start_time\n\n @property\n def state(self):\n if self.trip:\n return self.end_time\n\n @property\n def attributes(self):\n if self.trip:\n return dict(\n start_address=self.start_address,\n start_time=str(self.start_time),\n end_address=self.end_address,\n end_time=str(self.end_time),\n duration=str(self.duration),\n )\n\n\nclass BinarySensor(Instrument):\n def __init__(self, attr, name, device_class):\n super().__init__(component=\"binary_sensor\", attr=attr, name=name)\n self.device_class = device_class\n\n @property\n def is_mutable(self):\n return False\n\n @property\n def str_state(self):\n if self.device_class in [\"door\", \"window\"]:\n return \"Open\" if self.state else \"Closed\"\n if self.device_class == \"safety\":\n return \"Warning!\" if self.state else \"OK\"\n if self.device_class == \"plug\":\n return \"Charging\" if self.state else \"Plug removed\"\n if self.state is None:\n _LOGGER.error(\"Can not encode state %s:%s\", self.attr, self.state)\n return \"?\"\n return \"On\" if self.state else \"Off\"\n\n @property\n def state(self):\n val = super().state\n if isinstance(val, (bool, list)):\n # for list (e.g. bulb_failures):\n # empty list (False) means no problem\n return bool(val)\n elif isinstance(val, str):\n return val != \"Normal\"\n return val\n\n @property\n def is_on(self):\n return self.state\n\n\nclass BatteryChargeStatus(BinarySensor):\n def __init__(self):\n super().__init__(\n \"hvBattery.hvBatteryChargeStatusDerived\",\n \"Battery charging\",\n \"plug\",\n )\n\n @property\n def state(self):\n return super(BinarySensor, self).state == \"CablePluggedInCar_Charging\"\n\n\nclass Lock(Instrument):\n def __init__(self):\n super().__init__(component=\"lock\", attr=\"lock\", name=\"Door lock\")\n\n @property\n def is_mutable(self):\n return True\n\n @property\n def str_state(self):\n return \"Locked\" if self.state else \"Unlocked\"\n\n @property\n def state(self):\n return self.vehicle.is_locked\n\n @property\n def is_locked(self):\n return self.state\n\n async def lock(self):\n await self.vehicle.lock()\n\n async def unlock(self):\n await self.vehicle.unlock()\n\n\nclass Switch(Instrument):\n def __init__(self, attr, name, icon):\n super().__init__(component=\"switch\", attr=attr, name=name, icon=icon)\n\n @property\n def is_mutable(self):\n return True\n\n @property\n def str_state(self):\n return \"On\" if self.state else \"Off\"\n\n def is_on(self):\n return self.state\n\n def turn_on(self):\n pass\n\n def turn_off(self):\n pass\n\n\nclass Heater(Switch):\n def __init__(self):\n super().__init__(attr=\"heater\", name=\"Heater\", icon=\"mdi:radiator\")\n\n @property\n def state(self):\n return self.vehicle.is_heater_on\n\n async def turn_on(self):\n await self.vehicle.start_heater()\n\n async def turn_off(self):\n await self.vehicle.stop_heater()\n\n\nclass EngineStart(Switch):\n def __init__(self):\n super().__init__(\n attr=\"is_engine_running\", name=\"Engine\", icon=\"mdi:engine\"\n )\n\n @property\n def is_supported(self):\n return self.vehicle.is_engine_start_supported\n\n async def turn_on(self):\n await self.vehicle.start_engine()\n\n async def turn_off(self):\n await self.vehicle.stop_engine()\n\n\nclass Position(Instrument):\n def __init__(self):\n super().__init__(\n component=\"device_tracker\", attr=\"position\", name=\"Position\"\n )\n\n @property\n def is_mutable(self):\n return False\n\n @property\n def state(self):\n state = super().state or {}\n return (\n state.get(\"latitude\", \"?\"),\n state.get(\"longitude\", \"?\"),\n state.get(\"timestamp\", None),\n state.get(\"speed\", None),\n state.get(\"heading\", None),\n )\n\n @property\n def str_state(self):\n state = super().state or {}\n ts = state.get(\"timestamp\")\n return (\n state.get(\"latitude\", \"?\"),\n state.get(\"longitude\", \"?\"),\n str(ts.astimezone(tz=None)) if ts else None,\n state.get(\"speed\", None),\n state.get(\"heading\", None),\n )\n\n\n# FIXME: Maybe make this list configurable as external yaml\ndef create_instruments():\n return [\n Position(),\n Lock(),\n Heater(),\n Odometer(),\n Odometer(attr=\"tripMeter1\", name=\"Trip meter 1\"),\n Odometer(attr=\"tripMeter2\", name=\"Trip meter 2\"),\n Sensor(\n attr=\"fuelAmount\",\n name=\"Fuel amount\",\n icon=\"mdi:gas-station\",\n unit=\"L\",\n ),\n Sensor(\n attr=\"fuelAmountLevel\",\n name=\"Fuel level\",\n icon=\"mdi:water-percent\",\n unit=\"%\",\n ),\n FuelConsumption(),\n Sensor(\n attr=\"distanceToEmpty\", name=\"Range\", icon=\"mdi:ruler\", unit=\"km\"\n ),\n Sensor(\n attr=\"averageSpeed\",\n name=\"Average speed\",\n icon=\"mdi:ruler\",\n unit=\"km/h\",\n ),\n Sensor(\n attr=\"hvBattery.distanceToHVBatteryEmpty\",\n name=\"Battery range\",\n icon=\"mdi:ruler\",\n unit=\"km\",\n ),\n Sensor(\n attr=\"hvBattery.hvBatteryLevel\",\n name=\"Battery level\",\n icon=\"mdi:battery\",\n unit=\"%\",\n ),\n Sensor(\n attr=\"hvBattery.timeToHVBatteryFullyCharged\",\n name=\"Time to fully charged\",\n icon=\"mdi:clock\",\n unit=\"minutes\",\n ),\n BatteryChargeStatus(),\n EngineStart(),\n JournalLastTrip(),\n BinarySensor(\n attr=\"is_engine_running\", name=\"Engine\", device_class=\"power\"\n ),\n BinarySensor(attr=\"is_locked\", name=\"Door lock\", device_class=\"lock\"),\n BinarySensor(attr=\"doors.hoodOpen\", name=\"Hood\", device_class=\"door\"),\n BinarySensor(\n attr=\"doors.tailgateOpen\", name=\"Tailgate\", device_class=\"door\"\n ),\n BinarySensor(\n attr=\"doors.frontLeftDoorOpen\",\n name=\"Front left door\",\n device_class=\"door\",\n ),\n BinarySensor(\n attr=\"doors.frontRightDoorOpen\",\n name=\"Front right door\",\n device_class=\"door\",\n ),\n BinarySensor(\n attr=\"doors.rearLeftDoorOpen\",\n name=\"Rear left door\",\n device_class=\"door\",\n ),\n BinarySensor(\n attr=\"doors.rearRightDoorOpen\",\n name=\"Rear right door\",\n device_class=\"door\",\n ),\n BinarySensor(\n attr=\"windows.frontLeftWindowOpen\",\n name=\"Front left window\",\n device_class=\"window\",\n ),\n BinarySensor(\n attr=\"windows.frontRightWindowOpen\",\n name=\"Front right window\",\n device_class=\"window\",\n ),\n BinarySensor(\n attr=\"windows.rearLeftWindowOpen\",\n name=\"Rear left window\",\n device_class=\"window\",\n ),\n BinarySensor(\n attr=\"windows.rearRightWindowOpen\",\n name=\"Rear right window\",\n device_class=\"window\",\n ),\n BinarySensor(\n attr=\"tyrePressure.frontRightTyrePressure\",\n name=\"Front right tyre\",\n device_class=\"safety\",\n ),\n BinarySensor(\n attr=\"tyrePressure.frontLeftTyrePressure\",\n name=\"Front left tyre\",\n device_class=\"safety\",\n ),\n BinarySensor(\n attr=\"tyrePressure.rearRightTyrePressure\",\n name=\"Rear right tyre\",\n device_class=\"safety\",\n ),\n BinarySensor(\n attr=\"tyrePressure.rearLeftTyrePressure\",\n name=\"Rear left tyre\",\n device_class=\"safety\",\n ),\n BinarySensor(\n attr=\"washerFluidLevel\", name=\"Washer fluid\", device_class=\"safety\"\n ),\n BinarySensor(\n attr=\"brakeFluid\", name=\"Brake Fluid\", device_class=\"safety\"\n ),\n BinarySensor(\n attr=\"serviceWarningStatus\", name=\"Service\", device_class=\"safety\"\n ),\n BinarySensor(attr=\"bulbFailures\", name=\"Bulbs\", device_class=\"safety\"),\n BinarySensor(attr=\"any_door_open\", name=\"Doors\", device_class=\"door\"),\n BinarySensor(\n attr=\"any_window_open\", name=\"Windows\", device_class=\"window\"\n ),\n ]\n\n\nclass Dashboard:\n def __init__(self, vehicle, **config):\n _LOGGER.debug(\"Setting up dashboard with config :%s\", config)\n self.instruments = [\n instrument\n for instrument in create_instruments()\n if instrument.setup(vehicle, **config)\n ]\n","repo_name":"snaptec/openWB","sub_path":"modules/soc_volvo/volvooncall/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":13848,"program_lang":"python","lang":"en","doc_type":"code","stars":322,"dataset":"github-code","pt":"81"} +{"seq_id":"2144260360","text":"import math\n\n\ndef simulate_contributions_for(recipient_type, world):\n contribution_ratio = world['contribution_ratio']\n n_app_devs = world['n_app_devs']\n # n_apps = world['n_apps']\n n_users = world['n_users']\n per_app_users = world['per_app_users']\n apps_used_per_user = world['apps_used_per_user']\n lib_app_penetration = world['lib_app_penetration']\n known_app_value_to_user = world['known_app_value_to_user']\n known_lib_value_to_user = world['known_lib_value_to_user']\n lib_value_to_app_dev = world['lib_value_to_app_dev']\n app_dev_user_awareness = world['app_dev_user_awareness']\n\n contributing_devs = math.ceil(\n n_app_devs * lib_app_penetration)\n\n if(recipient_type == 'app'):\n contributing_users = math.ceil(per_app_users)\n user_amount = known_app_value_to_user * contribution_ratio\n dev_amount = 0\n\n else: # lib\n contributing_users = math.ceil(n_users)\n user_amount = known_lib_value_to_user * apps_used_per_user\n dev_amount = (lib_value_to_app_dev -\n (known_lib_value_to_user * per_app_users * app_dev_user_awareness)) * contribution_ratio\n\n user_contribs = [user_amount] * contributing_users\n dev_contribs = [dev_amount] * contributing_devs\n contributions = user_contribs + dev_contribs\n\n total_user_contribs_label = 'total_user_contribs_per_' + recipient_type\n total_dev_contribs_label = 'total_dev_contribs_per_' + recipient_type\n per_user_contribs_label = 'per_user_contribs_per_' + recipient_type\n per_dev_contribs_label = 'per_dev_contribs_per_' + recipient_type\n\n world.update([\n (total_user_contribs_label, sum(user_contribs)),\n (total_dev_contribs_label, sum(dev_contribs)),\n (per_user_contribs_label, user_amount),\n (per_dev_contribs_label, dev_amount)\n ])\n\n return contributions, world\n\n\ndef sum_of_roots(contributions):\n roots = [i ** (1/2) for i in contributions]\n return sum(roots)\n\n\ndef calc_match_for(contributions):\n roots = sum_of_roots(contributions)\n return roots ** 2\n\n\ndef calc_funding_for(recipient_type, world):\n contributions, world = simulate_contributions_for(recipient_type, world)\n raw_match = calc_match_for(contributions)\n directs = sum(contributions)\n\n return raw_match, directs, world\n\n\ndef calc_clr_matches(_raw_matches, world):\n budget = world['clr_budget']\n\n raw_sum = sum(_raw_matches)\n props = [i / raw_sum for i in _raw_matches]\n clr_matches = [i * budget for i in props]\n return clr_matches\n","repo_name":"spengrah/CLR-simulations","sub_path":"clr.py","file_name":"clr.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"7231881572","text":"from rest_framework import permissions\nfrom django.urls import path\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Svoya-Proverka API\",\n default_version='v2.0',\n description=\"API предназначен для быстрого поиска и проверки жалоб и бла-бла...\",\n contact=openapi.Contact(email=\"caramba.ge@yandex.ru\"),\n license=openapi.License(name=\"Svoya-proverka API\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path('documentation/swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('documentation/redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n]\n","repo_name":"Caramba2517/svoyaproverka_api","sub_path":"svoyaproverka_api/yasg.py","file_name":"yasg.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16477580954","text":"# encoding=utf8\n\"\"\"Creates State objects (scenarios) in the game.\"\"\"\nfrom State import *\n\n# State objects that represents different stages (scenarios) in the game.\ncrashState0 = State()\ncrashState1 = State()\ncrashState2 = State()\ncrashState3 = State()\ncrashState4 = State()\ncrashState5 = State()\ncrashState6 = State()\ncrashState7 = State()\ncrashState8 = State()\ncrashState9 = State()\ncrashState10 = State()\ncrashState11 = State()\n\ncrashState9.setDeadState(True)\ncrashState10.setDeadState(True)\ncrashState7.setFinalState(True)\ncrashState11.setDeadState(True)\n\ncrashState0.setScene(\n \"You wake up on a deserted beach shore along with the remains of the \"\n \"plane crash. You quickly discover you’re the only survivor. You \"\n \"think to yourself, how will you survive?\")\n\ncrashState1.setScene(\n \"You go to explore the right side of the island and you discover that \"\n \"despite you are the only plane crash survivor on a deserted island, \"\n \"you are not alone. In the distance, you notice that there is a dingy \"\n \"looking man nearby what looks to be his shelter. He doesn’t see you \"\n \"as you cautiously move in his direction.\")\n\ncrashState2.setScene(\n \"You now go to the left side of the island to explore. Despite your \"\n \"growing hunger, you find a large supply of wood. \")\n\ncrashState3.setScene(\n \"While the island man leaves his home, you enter in and look around. \"\n \"You see mostly junk but quickly land your eyes on what appears to be \"\n \"a makeshif fishing rod. You take it as you quickly escape the left \"\n \"side of the island before the island man comes back.\\n[Fishing \"\n \"unlocked: You can now go fishing in suitable location.]\")\n# TODO: Fishing Skill needs to be added to the player skills array\n\ncrashState4.setScene(\n \"The strange island man hears your steps as you move closer to him and\"\n \" turns around. To your surprise he rapidly turns around in a dramatic\"\n \" fashion and runs to you. He grabs a hold of you before you can do \"\n \"anything and says ”Boy, am I glad to see you”. The both of you \"\n \"explain how you got stuck on the island and soon become friends. The \"\n \"island man shares his shelter with you and now you have a new home \"\n \"and a friend. [Communication Skill Unlocked: You can now persaude \"\n \"people to benefit you]\")\n# TODO: Communication skill needs to be added to the player skills array\n\ncrashState8.setScene(\n \"You begin your journey to find berries and lucky for you are \"\n \"successful in your search.While you're gathering your berries you hear\"\n \" the bush in front of you rattle louder and louder.Suddenly a wolf \"\n \"comes out of the bush.\")\n \ncrashState9.setScene(\n \"You new friend teaches you to hunt and you become good, but…not that \"\n \"good. You don’t realize this and go off hunting on your own. Your \"\n \"naivety causes you to get killed by your lunch.\")\n\ncrashState10.setScene(\n \"Trying to save yourself, you run away as fast as you can from the \"\n \"wolf. However, you are no match for the beast. You get eaten by the \"\n \"beast.\")\n\ncrashState11.setScene(\"The berries were poisonous. You die!\")\n\ncrashState5.setScene(\n \"You put up a brave fight against the wolf. Despite getting injured, \"\n \"you kill the wolf.You return to the place where you made fire. On your\"\n \" way back you find a fishing rod\") \n \ncrashState6.setScene(\n \"You take the wood you just found and build a fire. Now that you’re \"\n \"warm, decide to find food.\")\n\ncrashState7.setScene(\n \"You are on your way to go fishing on the beach. After that you decide\"\n \" to go to the jungle.\")\n\n# Make transitions from one state to another state based on options.\ncrashState0.makeTransition(\"Explore the left side of the island\", crashState1)\ncrashState0.makeTransition(\"Explore the right side of the island\", crashState2)\n\ncrashState1.makeTransition(\"Secretly enter his home\", crashState3)\ncrashState1.makeTransition(\"Make him friend\", crashState4)\n\ncrashState2.makeTransition(\"Build a Fire\", crashState6)\ncrashState2.makeTransition(\"You find some unknown berries. Eat it\", crashState10)\n\ncrashState3.makeTransition('E', crashState2)\n\ncrashState4.makeTransition(\"Learn to hunt\", crashState9)\ncrashState4.makeTransition(\"Explore the rest of island\", crashState2)\n\ncrashState6.makeTransition(\"Go Fishing\", crashState7)\ncrashState6.makeTransition(\"Go pick some berries\", crashState8)\ncrashState7.setSkillNeeded(\"Fishing\")\n\ncrashState8.makeTransition(\"Run away\", crashState10)\ncrashState8.makeTransition(\"Pick up the wood near you, and fight the wolf\", crashState5)\n\ncrashState5.makeTransition('E', crashState2)\n","repo_name":"henchhing-limbu/Survive","sub_path":"crash_site.py","file_name":"crash_site.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34346651086","text":"import time\r\nimport aiofiles\r\nimport asyncio\r\nimport aiohttp\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\n\r\n\r\nasync def write_file(session, url, name_img):\r\n async with aiofiles.open(f'images/{name_img}', mode='wb') as f:\r\n async with session.get(url) as response:\r\n async for x in response.content.iter_chunked(1024):\r\n await f.write(x)\r\n print(f'Изображение сохранено {name_img}')\r\n\r\n\r\nasync def main():\r\n url = 'https://parsinger.ru/asyncio/aiofile/1/index.html'\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(url) as response:\r\n soup = BeautifulSoup(await response.text(), 'lxml')\r\n img_url = [f'https://parsinger.ru/asyncio/aiofile/1/{x[\"src\"]}' for x in soup.find_all('img')]\r\n tasks = []\r\n for link in img_url:\r\n name_img = link.split('/')[7]\r\n task = asyncio.create_task(write_file(session, link, name_img))\r\n tasks.append(task)\r\n await asyncio.gather(*tasks)\r\n\r\n\r\nstart = time.perf_counter()\r\nasyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\r\nasyncio.run(main())\r\nprint(f'Cохранено изображений {len(os.listdir(\"images/\"))} за {round(time.perf_counter() - start, 3)} сек')","repo_name":"data-hex/educational_projects","sub_path":"async/aiofile_parse_images_example.py","file_name":"aiofile_parse_images_example.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11975045590","text":"from functools import wraps\n\n\ndef type_logger(func):\n @wraps(func)\n def logger(*args, **kwargs):\n log = func(*args, **kwargs)\n\n print(f'{func.__name__}(', end='')\n\n if args:\n for arg in args:\n print(f'{arg}: {type(arg)}', end=', ')\n if kwargs:\n for key, value in kwargs.items():\n print(f'{key}: {type(value)}', end=', ')\n print(f'Result {log}: {type(log)})')\n\n return log\n\n return logger\n\n\n@type_logger\ndef calc_cube(x, exp):\n return x ** exp\n\n\nif __name__ == '__main__':\n a = calc_cube(x=5, exp=3)\n print(f'Имя декоратора замаскировано: {calc_cube.__name__}')\n","repo_name":"tkvitko/study_python_basics","sub_path":"Kvitko_Taras_dz_8/task_8_3.py","file_name":"task_8_3.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17795194237","text":"from django.shortcuts import render,redirect, get_object_or_404\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .models import Teacher,Assignment\r\nfrom attendance.models import daily_attendance\r\nfrom classes.models import ClassInfo\r\n# Create your views here.\r\n@login_required\r\ndef teacher_profile(request):\r\n\tteachers=Teacher.objects.get(user=request.user)\r\n\tclasses=teachers.class_info.all()\r\n\ta=[]\r\n\tfor c in classes:\r\n\t\ta.append(daily_attendance.objects.all().filter(class_info=c).filter(teacher=teachers).count())\r\n\r\n\treturn render(request,'teachers/profile.html',{'teachers':teachers,'classes':classes,'a':a})\r\n\r\n@login_required\r\ndef classes(request):\r\n\tteachers=Teacher.objects.all().filter(user=request.user)\r\n\tclasses=teachers[0].class_info.all()\r\n\r\n\treturn render(request,'teachers/class.html',{'teachers':teachers[0],'classes':classes})\r\n@login_required\r\ndef upload_assignment(request):\r\n\tteachers=Teacher.objects.all().filter(user=request.user)\r\n\tclasses=teachers[0].class_info.all()\r\n\tassign = Assignment.objects.all().filter(teacher=teachers[0])\r\n\tif request.method == 'POST' and request.FILES['assignment']:\r\n\t\tc=request.POST.get('class_info')\r\n\t\tcl=ClassInfo.objects.get(name=c)\r\n\t\td=request.POST.get('duedate')\r\n\t\ta = request.FILES['assignment']\r\n\t\tAssignment.objects.create(file=a, class_info=cl, duedate=d, teacher=teachers[0])\r\n\t\treturn render(request,'teachers/uploadedassignment.html',{'assign':assign,'classes':classes,'teachers':teachers[0]})\r\n\treturn render(request, 'teachers/upload_assignment.html',{'classes':classes,'teachers':teachers[0]})\r\n\r\ndef uploaded_assignment(request):\r\n\tteachers=Teacher.objects.all().filter(user=request.user)\r\n\tclasses=teachers[0].class_info.all()\r\n\tassign = Assignment.objects.all().filter(teacher=teachers[0])\r\n\treturn render(request,'teachers/uploadedassignment.html',{'assign':assign,'classes':classes,'teachers':teachers[0]})","repo_name":"surbhijha17/school_management_system","sub_path":"teachers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14941994541","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# Copyright 2014, Cercle Informatique ASBL. All rights reserved.\n#\n# This program is free software: you can redistribute it and/or modify it\n# under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or (at\n# your option) any later version.\n#\n# This software was made by hast, C4, ititou at UrLab, ULB's hackerspace\n\nfrom django.conf.urls import patterns, url, include\nfrom django.views.generic import TemplateView\nfrom authentification import app_redirection, ulb_redirection, intranet_auth\nfrom django.contrib.auth.views import login, logout\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom graph.urls import json_urls as graph_json\nfrom views import home, node_canonic, index, p402\nimport settings\n\n\n# decorator whom call function_in if user is authenticated, function_out if not\ndef user_logged(function_in, function_out):\n def toggle(request, *args, **kwargs):\n if request.user.is_authenticated():\n return function_in(request, *args, **kwargs)\n else:\n return function_out(request, *args, **kwargs)\n return toggle\n\n\nurlpatterns = patterns(\"\",\n # All JSON urls\n url(r\"^json/tree/\", include(graph_json)),\n url(r\"^json/node/\", include(\"polydag.urls\")),\n\n # The apps entry points\n url(r\"^calendar/\", include(\"calendars.urls\")),\n url(r\"^ulb/\", include(\"graph.urls\")),\n url(r\"^document/\", include(\"documents.urls\")),\n url(r\"^telepathy/\", include(\"telepathy.urls\")),\n url(r\"^notifications/\", include(\"notify.urls\")),\n url(r\"^users/\", include(\"users.urls\")),\n\n url(r\"^node/(?P\\d+)$\", node_canonic, name=\"node_canonic\"),\n\n url(r\"^$\",\n user_logged(home, index),\n name=\"index\"),\n\n url(r\"^p402/$\", p402, name=\"p402\"),\n\n url(r\"^syslogin$\",\n user_logged(app_redirection, login),\n {\"template_name\": \"syslogin.html\"},\n name=\"syslogin\"),\n\n url(r\"^auth/(?P.*)$\",\n intranet_auth,\n name=\"auth_entry\"),\n\n url(r\"^logout$\",\n logout, {\"next_page\": \"/\"},\n name=\"logout\"),\n\n # fragments\n url(r\"^\", include(\"fragments.urls\")),\n\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^help/markdown$', TemplateView.as_view(template_name='markdown.html'), name=\"markdown_help\"),\n url(r'^help/$', TemplateView.as_view(template_name='help.html'), name=\"help\"),\n)\n\nhandler400 = 'www.error.error400'\nhandler403 = 'www.error.error403'\nhandler404 = 'www.error.error404'\nhandler500 = 'www.error.error500'\n\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"williamjin127/Dochub","sub_path":"www/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23269225791","text":"# W05 Prove Assignment.\n\n# Name: Lucas Neves Rocha.\n# Course: Programming building blocks.\n# Instructor: Travis Christiansen.\n\n# Task: Create a text-type adventure game.\nfrom sys import exit\nfrom os import system as clear\n\nclear(\"cls\")\nname = input(\"What is your name?\\n\")\n\n# The input \"start\" asks if the user wants to participate of the assignment.\n# While is used so eveything else that is not Y or N will be rejected.\n# I used \"clear\" to keep the terminal clean.\n\nwhile True:\n clear(\"cls\")\n while True:\n print(f\"Well {name.capitalize()}, nice to meet you. My name is Lucas and i really need your help with something.\")\n print()\n\n start = input(\"I have homework to do... It is about creating an Adventure Game with options or, \\nbetter saying, paths to take. Could you help me? [Y/N]\\n\")\n if start.lower() == \"n\":\n clear(\"cls\")\n print(\"No worries, i'll find someone else to help me. Anyways, thank you and have a great week! :)\")\n break\n\n elif start.lower() == \"y\":\n clear(\"cls\")\n print(\"Okay, let's move on.\")\n print()\n break\n\n else:\n clear(\"cls\")\n print(\"Wrong input, please answer with Y or N. \")\n print()\n\n if start.lower() == \"n\":\n break\n\n # Here is the first part of the program. The user can choose between two options.\n \n clear(\"cls\")\n while True:\n decision = input(\"It is Monday. You wake up with fair enough time before work, \\nbut you also have homework from college to do. What is the first thing to do? SCRIPTURE STUDY or HOMEWORK?\\n\") \n if decision.lower() == \"scripture study\":\n clear(\"cls\")\n print(\"You realize that by reading the scriptures and praying you'll have\\nthe spirit to work on your assignments and have a good performance at your work.\")\n\n # If the user types \"scripture study\" he/she will have another three options, but then the program ends.\n # If the user types anything else, the program doesn't work.\n\n while True:\n decision1_1 = input(\"Linear reading is always good, but you thoughtfully choose between 2 NEPHI, ALMA and ETHER.\\n\")\n if decision1_1.lower() == \"2 nephi\":\n clear(\"cls\")\n print(\"You open the Book of Mormon and read, in 3 Nephi 32:3 that you should\\n'feast upon the words of Christ' because 'the words of Christ will tell you all things what ye should do.'\")\n print(\"You realize that needs to be more diligent with scripture studies and plan to read them for at least 15 minutes every day before work.\")\n print()\n break\n\n elif decision1_1.lower() == \"alma\":\n clear(\"cls\")\n print(\"You open the Book of Mormon and read, in Alma 7:12 that Christ suffered to know how to help you according to your infirmities.\")\n print(\"By reading this scripture you grow your testimony about Christ's divine role in guide you through your decisions.\")\n print()\n break\n\n elif decision1_1.lower() == \"ether\":\n clear(\"cls\")\n print(\"You open the Book of Mormon and read, in Ether 12:27 that Christ 'give unto men weakness that they may\\nbe humble' and that if you humble yourself before Him, He will make 'weak things become strong' to you.\")\n print(\"Then you realize that by being humble and asking His help to conciliate all the things you need to do, He would definatively strenghten you.\")\n print()\n break\n else:\n clear(\"cls\")\n print(\"Wrong input, try again.\")\n print()\n break\n\n # If the user choses \"homework\" the program continues.\n\n elif decision.lower() == \"homework\":\n clear(\"cls\")\n print(\"You start to work on your college assignments but after 15 minutes gets distracted and\\ncatch yourself watching random YouTube videos, wasting time.\")\n print(\"When you realize, you ended up losing the entire morning and need to hurry up to your job,\\nthus not reading the scriptures, neither doing your homework. \")\n print(\"\")\n print(\"After being sad with yourself the whole day, you decide that when you arive home, you will take a serious study time.\")\n\n # Another while to keeps the user typing what he/she should type on the last part of the program.\n\n while True:\n decision2 = input(\"You finally arive home. You're so tired after a whole day of hardwork,\\nand even question yourself... 'i am going to STUDY or REST?'\\n\")\n if decision2.lower() == \"study\":\n clear(\"cls\") \n print(\"You take a cold shower, eat dinner and push yourself to focus and work on your assignments; thus finishing everything you needed to do by 11PM.\")\n print(\"Then you go to your bed and fall asleep.\")\n print()\n print(\"You learned the lesson and never neglect your study time again. :)\")\n break\n\n elif decision2.lower() == \"rest\":\n clear(\"cls\")\n print(\"You haven't learned the lesson... :(\")\n print(\"Thank you for participating!\")\n break\n \n else:\n clear(\"cls\")\n print(\"Wrong input, try again.\")\n print()\n break\n else:\n clear(\"cls\")\n print(\"Wrong input, try again.\")\n print()\n \n \n break\n","repo_name":"neves-lucas/python-pathway","sub_path":"W01 - W06/W05 Prove Milestone - Adventure Game.py","file_name":"W05 Prove Milestone - Adventure Game.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9203605931","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom resources.lib.libraries import client\n\n\ndef resolve(url):\n try:\n url = re.compile('//.+?/.+?/([\\w]+)').findall(url)[0]\n url = 'http://www.filepup.net/play/%s' % url\n\n result = client.request(url)\n\n url = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'})[0]\n return url\n except:\n return\n\n","repo_name":"JohnKypri/repo","sub_path":"plugin.video.doofree/resources/lib/resolvers/filepup.py","file_name":"filepup.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73663719945","text":"#!/usr/bin/env python\n\n\"\"\" MultiQC module to parse output from STAR \"\"\"\n\nfrom __future__ import print_function\nfrom collections import OrderedDict\nimport logging\nimport re\n\nfrom multiqc import config, BaseMultiqcModule, plots\n\n# Initialise the logger\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n\n def __init__(self):\n\n # Initialise the parent object\n super(MultiqcModule, self).__init__(name='STAR', anchor='star', \n href=\"https://github.com/alexdobin/STAR\", \n info=\"is an ultrafast universal RNA-seq aligner.\")\n\n # Find and load any STAR reports\n self.star_data = dict()\n for f in self.find_log_files(config.sp['star']):\n parsed_data = self.parse_star_report(f['f'])\n if parsed_data is not None:\n s_name = f['s_name'].split('Log.final.out', 1)[0]\n if s_name in self.star_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.add_data_source(f, s_name)\n self.star_data[s_name] = parsed_data\n\n if len(self.star_data) == 0:\n log.debug(\"Could not find any reports in {}\".format(config.analysis_dir))\n raise UserWarning\n\n log.info(\"Found {} reports\".format(len(self.star_data)))\n\n # Write parsed report data to a file\n self.write_data_file(self.star_data, 'multiqc_star')\n\n # Basic Stats Table\n self.star_stats_table()\n\n # Alignment bar plot - only one section, so add to the module intro\n self.intro += self.star_alignment_chart()\n\n\n def parse_star_report (self, raw_data):\n \"\"\" Parse the final STAR log file. \"\"\"\n\n regexes = {\n 'total_reads': r\"Number of input reads \\|\\s+(\\d+)\",\n 'avg_input_read_length': r\"Average input read length \\|\\s+([\\d\\.]+)\",\n 'uniquely_mapped': r\"Uniquely mapped reads number \\|\\s+(\\d+)\",\n 'uniquely_mapped_percent': r\"Uniquely mapped reads % \\|\\s+([\\d\\.]+)\",\n 'avg_mapped_read_length': r\"Average mapped length \\|\\s+([\\d\\.]+)\",\n 'num_splices': r\"Number of splices: Total \\|\\s+(\\d+)\",\n 'num_annotated_splices': r\"Number of splices: Annotated \\(sjdb\\) \\|\\s+(\\d+)\",\n 'num_GTAG_splices': r\"Number of splices: GT/AG \\|\\s+(\\d+)\",\n 'num_GCAG_splices': r\"Number of splices: GC/AG \\|\\s+(\\d+)\",\n 'num_ATAC_splices': r\"Number of splices: AT/AC \\|\\s+(\\d+)\",\n 'num_noncanonical_splices': r\"Number of splices: Non-canonical \\|\\s+(\\d+)\",\n 'mismatch_rate': r\"Mismatch rate per base, % \\|\\s+([\\d\\.]+)\",\n 'deletion_rate': r\"Deletion rate per base \\|\\s+([\\d\\.]+)\",\n 'deletion_length': r\"Deletion average length \\|\\s+([\\d\\.]+)\",\n 'insertion_rate': r\"Insertion rate per base \\|\\s+([\\d\\.]+)\",\n 'insertion_length': r\"Insertion average length \\|\\s+([\\d\\.]+)\",\n 'multimapped': r\"Number of reads mapped to multiple loci \\|\\s+(\\d+)\",\n 'multimapped_percent': r\"% of reads mapped to multiple loci \\|\\s+([\\d\\.]+)\",\n 'multimapped_toomany': r\"Number of reads mapped to too many loci \\|\\s+(\\d+)\",\n 'multimapped_toomany_percent': r\"% of reads mapped to too many loci \\|\\s+([\\d\\.]+)\",\n 'unmapped_mismatches_percent': r\"% of reads unmapped: too many mismatches \\|\\s+([\\d\\.]+)\",\n 'unmapped_tooshort_percent': r\"% of reads unmapped: too short \\|\\s+([\\d\\.]+)\",\n 'unmapped_other_percent': r\"% of reads unmapped: other \\|\\s+([\\d\\.]+)\",\n }\n parsed_data = {}\n for k, r in regexes.items():\n r_search = re.search(r, raw_data, re.MULTILINE)\n if r_search:\n parsed_data[k] = float(r_search.group(1))\n # Figure out the numbers for unmapped as for some reason only the percentages are given\n try:\n total_mapped = parsed_data['uniquely_mapped'] + parsed_data['multimapped'] + parsed_data['multimapped_toomany']\n unmapped_count = parsed_data['total_reads'] - total_mapped\n total_unmapped_percent = parsed_data['unmapped_mismatches_percent'] + parsed_data['unmapped_tooshort_percent'] + parsed_data['unmapped_other_percent']\n parsed_data['unmapped_mismatches'] = int(round(unmapped_count * (parsed_data['unmapped_mismatches_percent'] / total_unmapped_percent), 0))\n parsed_data['unmapped_tooshort'] = int(round(unmapped_count * (parsed_data['unmapped_tooshort_percent'] / total_unmapped_percent), 0))\n parsed_data['unmapped_other'] = int(round(unmapped_count * (parsed_data['unmapped_other_percent'] / total_unmapped_percent), 0))\n except KeyError:\n pass\n\n if len(parsed_data) == 0: return None\n return parsed_data\n\n\n def star_stats_table(self):\n \"\"\" Take the parsed stats from the STAR report and add them to the\n basic stats table at the top of the report \"\"\"\n \n headers = OrderedDict()\n headers['uniquely_mapped_percent'] = {\n 'title': '% Aligned',\n 'description': '% Uniquely mapped reads',\n 'max': 100,\n 'min': 0,\n 'suffix': '%',\n 'scale': 'YlGn',\n 'format': '{:.1f}%'\n }\n headers['uniquely_mapped'] = {\n 'title': 'M Aligned',\n 'description': 'Uniquely mapped reads (millions)',\n 'min': 0,\n 'scale': 'PuRd',\n 'modify': lambda x: x / 1000000,\n 'shared_key': 'read_count'\n }\n self.general_stats_addcols(self.star_data, headers)\n\n def star_alignment_chart (self):\n \"\"\" Make the HighCharts HTML to plot the alignment rates \"\"\"\n \n # Specify the order of the different possible categories\n keys = OrderedDict()\n keys['uniquely_mapped'] = { 'color': '#437bb1', 'name': 'Uniquely mapped' }\n keys['multimapped'] = { 'color': '#7cb5ec', 'name': 'Mapped to multiple loci' }\n keys['multimapped_toomany'] = { 'color': '#f7a35c', 'name': 'Mapped to too many loci' }\n keys['unmapped_mismatches'] = { 'color': '#e63491', 'name': 'Unmapped: too many mismatches' }\n keys['unmapped_tooshort'] = { 'color': '#b1084c', 'name': 'Unmapped: too short' }\n keys['unmapped_other'] = { 'color': '#7f0000', 'name': 'Unmapped: other' }\n \n # Config for the plot\n pconfig = {\n 'id': 'star_alignment_plot',\n 'title': 'STAR Alignment Scores',\n 'ylab': '# Reads',\n 'cpswitch_counts_label': 'Number of Reads'\n }\n \n return plots.bargraph.plot(self.star_data, keys, pconfig)\n","repo_name":"jajclement/hotSSDS","sub_path":"archive/DEV/singularity_build/MultiQC_SSDS_Rev1/lib/python2.7/site-packages/multiqc-0.7dev-py2.7.egg/multiqc/modules/star/star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":6967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24913598497","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport pandas as pd\nfrom functools import reduce\n\ndf_1 = pd.read_csv('/Users/cristinamulas/Desktop/fish-and-seafood-consumption-per-capita.csv')\ndf_1 = pd.DataFrame(df_1) # convert into DataFrame\n#print(df_1.head()) # print 5 rows\ndf_1columns = pd.read_csv('/Users/cristinamulas/Desktop/fish-and-seafood-consumption-per-capita.csv', nrows=0, sep='\\t').columns.tolist()\n#print(df_1columns) # ALL COLUMNS IN THIS DATAFRAME\ndf_1 =df_1.drop(columns=['Code']) # REMOVE CODE COLUMN\n#print(df_1)\nfish2013 = df_1.loc[df_1['Year'] == 2013] # selecting only 2013 rows\n#fish2013\n#print(fish2013['Year'].isnull()) # cheking for null values in Year column\nfish2013 = fish2013.dropna(how='any',axis=0) # delete null values\n#print(fish2013.head())\n\n# print(fish2013.describe()) # Statistical description on this table\n\ndf_2 = pd.read_csv('/Users/cristinamulas/Desktop/number-of-people-with-cancer.csv')\ndf_2 = pd.DataFrame(df_2) # convert into DataFrame\n#print(df_2.head()) # print 5 rows\n\ndf_2columns = pd.read_csv('/Users/cristinamulas/Desktop/number-of-people-with-cancer.csv', nrows=0, sep='\\t').columns.tolist()\n#print(df_2columns) # ALL COLUMNS IN THIS DATAFRAME\n\ndf_2 =df_2.drop(columns=['Code']) # REMOVE CODE COLUMN\n#print(df_2)\ncancer2013 = df_2.loc[df_1['Year'] == 2013] # selecting only 2013 rows\n#cancer2013\n# print(cancer2013['Year'].isnull()) # cheking for null values in Year column\ncancer2013 = cancer2013.dropna(how='any',axis=0) # delete null values\n#cancer2013\n# print(cancer2013.describe()) # Statistical description on this table\ndf_3 = pd.read_csv('/Users/cristinamulas/Desktop/global-meat-production.csv')\ndf_3 = pd.DataFrame(df_1) # convert into DataFrame\n#print(df_3.head()) # print 5 rows\ndf_3columns = pd.read_csv('/Users/cristinamulas/Desktop/global-meat-production.csv', nrows=0, sep='\\t').columns.tolist()\n#print(df_3columns) # ALL COLUMNS IN THIS DATAFRAME\n\nmeat2013 = df_3.loc[df_1['Year'] == 2013] # selecting only 2013 rows\n#meat2013\n# print(meat2013['Year'].isnull()) # cheking for null values in Year column\nmeat2013 = meat2013.dropna(how='any',axis=0) # delete null values\n#meat2013\n# print(meat2013.describe()) # Statistical description on this table\ndf_4 = pd.read_csv('/Users/cristinamulas/Desktop/per-capita-milk-consumption.csv')\ndf_4 = pd.DataFrame(df_4) # convert into DataFrame\n#print(df_4.head()) # print 5 rows\n\ndf_4columns = pd.read_csv('/Users/cristinamulas/Desktop/per-capita-milk-consumption.csv', nrows=0, sep='\\t').columns.tolist()\n#print(df_4columns) # ALL COLUMNS IN THIS DATAFRAME\ndf_4 =df_4.drop(columns=['Code']) # REMOVE CODE COLUMN\n#print(df_4)\nmilk2013 = df_2.loc[df_1['Year'] == 2013] # selecting only 2013 rows\n#milk2013\n# print(milk2013['Year'].isnull()) # cheking for null values in Year column\nmilk2013 = milk2013.dropna(how='any',axis=0) # delete null values\n#milk2013\n#print(milk2013.describe()) # Statistical description on this table\ndf_5 = pd.read_csv('/Users/cristinamulas/Desktop/per-capita-egg-consumption-kilograms-per-year.csv')\ndf_5 = pd.DataFrame(df_5) # convert into DataFrame\n#print(df_5.head()) # print 5 rows\n\ndf_5columns = pd.read_csv('/Users/cristinamulas/Desktop/per-capita-egg-consumption-kilograms-per-year.csv', nrows=0, sep='\\t').columns.tolist()\n#print(df_5columns) # ALL COLUMNS IN THIS DATAFRAME\ndf_5 =df_5.drop(columns=['Code']) # REMOVE CODE COLUMN\n#print(df_5)\negg2013 = df_5.loc[df_1['Year'] == 2013] # selecting only 2013 rows\n#egg2013\n# print(egg2013['Year'].isnull()) # cheking for null values in Year column\negg2013 = egg2013.dropna(how='any',axis=0) # delete null values\n#egg2013\n#print(egg2013.describe()) # Statistical description on this table\ndata_frames = [fish2013, meat2013, milk2013, egg2013, cancer2013 ]\ndf_final = reduce(lambda left,right:pd.merge(left,right,on='Year'), data_frames)\nprint(df_final.head())\n\n\n\n\n\n\n# Normalize total_bedrooms column\n#x_array = np.array(df2013[''])\n#normalized_X = preprocessing.normalize([x_array])\n#conver to DF\n\n","repo_name":"Cristinamulas/Jupiter-Python-Project","sub_path":"Cleaning Data Python Project.py","file_name":"Cleaning Data Python Project.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18398902780","text":"#\nimport cv2, glob, os \nimport os.path as osp\nimport numpy as np \nimport geo_func as geo \nimport igl \nimport scipy.optimize as opt\nimport re\nimport tqdm\nimport scipy.spatial as sp\nimport camera_clib as clib\nimport data_loader as dl\nimport open3d as o3d\nimport open3d.visualization.rendering as rendering\n\nlmk_idx = [\n1278,\n1272,\n12,\n1834,\n243,\n781,\n2199,\n1447,\n966,\n3661,\n4390,\n3022,\n2484,\n4036,\n2253,\n3490,\n3496,\n268,\n493,\n1914,\n2044,\n1401,\n3615,\n4240,\n4114,\n2734,\n2509,\n978,\n4527,\n4942,\n4857,\n1140,\n2075,\n1147,\n4269,\n3360,\n1507,\n1542,\n1537,\n1528,\n1518,\n1511,\n3742,\n3751,\n3756,\n3721,\n3725,\n3732,\n5708,\n5695,\n2081,\n0,\n4275,\n6200,\n6213,\n6346,\n6461,\n5518,\n5957,\n5841,\n5702,\n5711,\n5533,\n6216,\n6207,\n6470,\n5517,\n5966,\n]\ndef atof(text):\n try:\n retval = float(text)\n except ValueError:\n retval = text\n return retval\n\ndef natural_keys(text):\n '''\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n float regex comes from https://stackoverflow.com/a/12643073/190597\n '''\n return [ atof(c) for c in re.split(r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text) ]\n\nclass PreProp:\n def __init__(self, meta_dir, mesh_dir, lmk_meta = \"./ict_lmk_info.yaml\"):\n \n self.proj = np.identity(4)\n self.rigid_trans = np.identity(4)\n self.meta_location = meta_dir \n self.lmk_meta_location = lmk_meta\n self.img_meta, self.img_root, self.img_file_ext, = dl.load_extracted_lmk_meta(self.meta_location)\n self.full_index, self.eye, self.contour, self.mouse, self.eyebrow, self.nose = dl.load_ict_landmark(lmk_meta)\n self.mesh_dir = mesh_dir\n\n\n\n def build(self, cutter = None):\n imgs = self.load_data()\n meshes = self.load_mesh(cutter)\n\n def load_mesh(self, cutter = None):\n root_dir = self.mesh_dir\n\n identity_mesh_name = glob.glob(osp.join(root_dir, \"identity**.obj\"))\n identity_mesh_name.sort(key= natural_keys)\n identity_mesh_name = identity_mesh_name[:cutter]\n self.meshes = []\n self.f = None \n mesh_collect = []\n for id_file_path in tqdm.tqdm(identity_mesh_name):\n id_name = osp.basename(id_file_path)\n \n v, _ = igl.read_triangle_mesh(id_file_path)\n mesh_collect.append(v)\n \n self.neutral_mesh_v , self.neutral_mesh_f = igl.read_triangle_mesh(osp.join(root_dir, \"generic_neutral_mesh.obj\"))\n self.id_meshes = mesh_collect\n\n expr_paths = glob.glob(osp.join(root_dir, \"shapes\", \"generic_neutral_mesh\", \"**.obj\"))\n expr_paths.sort(key= natural_keys)\n mesh_collect = []\n for expr_path in tqdm.tqdm(expr_paths, leave=False):\n v, f = igl.read_triangle_mesh(expr_path)\n mesh_collect.append(v)\n self.meshes.append(mesh_collect)\n self.expr_meshes = mesh_collect\n\n \n\n\n\n def load_data(self):\n extension = [\".jpeg\", \".png\", \".jpg\"]\n\n def read_lmk_meta(path):\n lmk = []\n with open(path, \"r\") as fp: \n while True:\n ss = fp.readline()\n if not ss:\n break\n ss = ss.rstrip(\"\\n\")\n x, y = ss.split(\" \")\n lmk.append([float(x),float(y)])\n return lmk\n\n self.img_meta.keys()\n\n\n self.img_and_info = dict()\n self.img_list = []\n for category_idx, key in enumerate(self.img_meta.keys()):\n category = self.img_and_info.get(key, None)\n meta_item = self.img_meta[key] # into category\n if category == None :\n self.img_and_info[key] = []\n category = self.img_and_info[key]\n \n for meta_data in meta_item:\n meta_data['landmark']\n name = meta_data['name']\n lmk_data = read_lmk_meta(meta_data['landmark'])\n img_data = cv2.imread(osp.join(self.img_root, name+self.img_file_ext))\n img_data = {'category_index' : category_idx, 'index' : len(self.img_list), \"name\" : name, \"lmk_data\" : lmk_data, \"img_data\": img_data}\n self.img_list.append(img_data)\n category.append(img_data)\n\n return self.img_list\n \n\n\n def calc_cam_intrinsic_Q(self,guessed_focal_length, img_w, img_h):\n #naive\n\n h,w, = img_h, img_w\n uv = [w/2, h/2]\n Q = np.zeros((3,3))\n\n focal_length = guessed_focal_length\n Q[:2, -1] = uv\n Q[-1, -1] = 1\n Q[0, 0] = Q[1, 1] = focal_length\n \n return Q\n \n def projection(Q, V):\n new_v = (Q @ V.T).T\n new_v /= new_v[:, -1]\n return new_v[:, :-1]\n\n # def find_fit_projection_mat(self, Q, R, v, lmk):\n # old_f = Q[0,0]\n # u = Q[0, -1]\n # v = Q[1, -1]\n # rot_v_T = R@v.T\n # rot_v_T[:-1, :] /= rot_v_T[-1, :]\n # rot_v_T = rot_v_T[:-1, :]\n \n # lmk[:, 0] -= u\n # lmk[:, 1] -= v\n\n\n # res = np.linalg.lstsq(rot_v_T.reshape(-1, 1), lmk.reshape(-1, 1))\n # new_f = res[0]\n # Q[0,0] = new_f\n # Q[1,1] = new_f\n\n # return Q \n def find_fit_projection_mat(self, obj_func,neutral, ids, exprs, mesh_lmk_mapping_idx, id_weight, expr_weight, Q , R_t, lmk, max_focal_length = 100):\n \"\"\"\n init_focal_length : pixel based focal length\n \"\"\"\n \n old_f = Q[0,0]\n u = Q[0, -1]\n v = Q[1, -1]\n rot_v_T = R@v.T\n rot_v_T[:-1, :] /= rot_v_T[-1, :]\n rot_v_T = rot_v_T[:-1, :]\n \n lmk[:, 0] -= u\n lmk[:, 1] -= v\n \n\n\n fx = 0\n fx_min = 0\n fx_max = max_focal_length\n def calc_Q(fx):\n\n reQ = np.copy(Q)\n reQ[0,0] = fx \n reQ[1,1] = fx \n return reQ\n def calc_func(Q_cur, Q_prev):\n f_current = obj_func(Q_cur, v, lmk)\n f_prev = obj_func(Q_prev, v, lmk)\n return f_current, f_prev\n k = fx_max - fx_min\n eps = 0.001\n fx = fx + k\n\n f_current, f_prev = calc_func(Q)\n while True(abs(f_current - f_prev) <= eps ):\n if f_prev >= f_current :\n \n f_current = calc_func(Q)\n\n\n \n f_current, f_prev = calc_func(Q)\n\n\n\n\n return Q \n \n def convert_focal_length(w, h, mm_focal_length=36):\n \"\"\"\n mm to pixel\n \"\"\"\n m = max(h, w)\n standard_sensor_size = 36\n m *mm_focal_length / standard_sensor_size\n\n\n # def shape_fit(self, lmks_2d, images, id_meshes, expr_meshes, lmk_idx):\n def shape_fit(self, id_meshes, expr_meshes, lmk_idx):\n # Q = clib.calibrate('./images/checker4/*.jpg')\n # extract actor-specific blendshapes\n # iteratively fit shapes\n # it consists with 2 phases.\n # we find id weight, expr weight, proj matrix per each iteration, \n # but we don't save and reuse each weights,\n # then why we should iterate optimization, \n # The main reason why we calculate weights is find contour points and fit them to real face.\n # \n \n lmk_idx = np.array(lmk_idx)\n lmk_idx_list = np.stack([lmk_idx for _ in range(len(self.img_list))],axis=0)\n\n neutral = self.neutral_mesh_v\n # neutral_bar = neutral[lmk_idx, :]\n\n ids = np.array(id_meshes)\n ids -= np.expand_dims(neutral, axis=0)\n # ids_bar = ids[..., lmk_idx, :]\n id_num,_,_ = ids.shape\n\n\n expr = np.array(expr_meshes)\n expr -= np.expand_dims(neutral, axis=0)\n # expr_bar = expr[..., lmk_idx, :]\n expr_num, _,_ = expr.shape\n \n # lmks_2d = np.array(lmks_2d)\n\n # def get_combine_bar_model( w_i, w_e):\n # nonlocal neutral_bar, expr_bar, ids_bar\n def get_combine_bar_model(neutral_bar,ids_bar, expr_bar, w_i, w_e):\n expr_num, expr_v_size, expr_dim = expr_bar.shape\n id_num, id_v_size, id_dim = ids_bar.shape \n\n reshaped_expr = expr_bar.reshape(expr_num, expr_v_size*expr_dim).T\n reshaped_id = ids_bar.reshape(id_num, id_v_size*id_dim).T\n res = reshaped_id@w_i + reshaped_expr@w_e \n # res = res.reshape(id_dim, id_v_size).T\n res = res.reshape(id_v_size, id_dim)\n return neutral_bar + res\n def get_combine_model(w_i, w_e):\n nonlocal neutral, expr, ids\n expr_num, expr_v_size, expr_dim = expr.shape\n new_exps = expr.reshape(expr_num, expr_v_size*expr_dim ).T\n id_num, id_v_size, id_dim = ids.shape \n new_ids = ids.reshape(id_num, id_v_size*id_dim).T\n # exp_res = new_exps@w_e\n # exp_res = exp_res.T.reshape(c,d)\n # id_res = new_ids@w_i\n # id_res = id_res.T.reshape(g,h)\n res = new_ids@w_i + new_exps@w_e\n res = res.reshape(id_v_size, id_dim)\n return neutral + res\n \n \n \n\n \n import copy \n def draw_circle(v, img, colors = (1.0,0.0,0.0)):\n for vv in v:\n cv2.circle(img, center=vv.astype(int), radius=10, color=colors, thickness=2)\n\n def resize(img, width):\n h,w,c = img.shape\n ratio = width / w \n new_h = h*ratio \n img = cv2.resize(img, [int(new_h), int(width)])\n return img\n \n def calc_Rt():\n pass\n\n def find_contour(lmk2d, proj_3d_v):\n hull = sp.ConvexHull(proj_3d_v)\n convex_index = hull.vertices\n kd = sp.cKDTree(proj_3d_v[convex_index, :])\n d, idx = kd.query(lmk2d)\n return convex_index[idx]\n\n \n def draw_cv(index, expr_index, flag, id_weight, expr_weights, cam_scales, cam_rots, cam_tvecs):\n img = self.img_list[index]['img_data']\n truth = copy.deepcopy(img)\n test = copy.deepcopy(img)\n sel_lmk = np.array(self.img_list[index]['lmk_data'])\n exp_weight = expr_weights[expr_index]\n \n sel_index_list = lmk_idx_list[index]\n verts_3d = get_combine_bar_model(neutral[sel_index_list, :], ids[:,sel_index_list,:], expr[:,sel_index_list,:], id_weight, exp_weight)\n out_of_concern_idx = [i for i in range(len(neutral)) if i not in sel_index_list]\n non_sel_mesh_v = get_combine_model(id_weight, exp_weight)\n non_sel_mesh_v = non_sel_mesh_v[out_of_concern_idx]\n # draw_circle(transform_lm3d(verts_3d, 1, np.eye(3,3), np.zeros((3,1))), test, (255,0,0))\n non_sel_mesh_v = non_sel_mesh_v[::int(len(non_sel_mesh_v)//100),:]\n draw_circle(transform_lm3d(non_sel_mesh_v, cam_scales[index],cam_rots[index],cam_tvecs[index]), test, (0,255,0))\n draw_circle(transform_lm3d(verts_3d, cam_scales[index],cam_rots[index],cam_tvecs[index]), test, (0,0,255))\n draw_circle(sel_lmk, truth, (255,0,0))\n truth = resize(truth, 800)\n test = resize(test, 800)\n show_img = np.concatenate([truth, test], 1)\n cv2.imshow(\"test\", show_img)\n \n path_name = osp.join(\"testdir\", str(index))\n if not os.path.exists(path_name):\n os.makedirs(path_name)\n vv = get_combine_model(id_weight, exp_weight)\n igl.write_triangle_mesh(os.path.join(path_name, \"test\" + \".obj\"), vv, self.neutral_mesh_f)\n if not flag:\n key = cv2.waitKey(0)\n else:\n key = cv2.waitKey(100)\n\n if key == ord('q'):\n return False\n elif key == ord('a'):\n return True\n else :\n return True\n \n\n\n def draw_contour(img, lmk, new_contour, orig, flag):\n draw_circle(lmk,img, colors=(255,0,0))\n draw_circle(new_contour,img, colors=(0,0,255))\n draw_circle(orig, img, colors=(0,255,255))\n for i in range(len(new_contour) -1):\n cv2.line(img, new_contour[i].astype(int), new_contour[i+1].astype(int), color=(0,255,0), thickness=3)\n\n for i in range(len(orig) -1):\n cv2.line(img, orig[i].astype(int), orig[i+1].astype(int), color=(255,255,0), thickness=3)\n img= resize(img, 1500)\n cv2.imshow(\"contour\", img)\n if not flag:\n key = cv2.waitKey(0)\n else:\n key = cv2.waitKey(100)\n\n if key == ord('q'):\n return False\n elif key == ord('a'):\n return True\n else :\n return True\n\n def coordinate_descent(cost_function, Q, init_x, y, iter_num, eps = 10e-7):\n if len(init_x.shape) == 1 : \n init_x = init_x.reshape(-1, 1)\n def cost_wrapper(x):\n cons = x[6:, :] # regularization term\n return cost_function(Q, neutral, pose, x, y) + cons.T@cons \n \n def cost_grad_wrapper(ind):\n def wrapper(x):\n copied_x = np.copy(x)\n copied_x[ind, 0] -= eps\n f_val = cost_wrapper(copied_x)\n copied_x[ind, 0] += 2*eps\n f_h_val = cost_wrapper(copied_x)\n gradient = (f_h_val - f_val)/(2*eps)\n gradient_array = np.zeros_like(x)\n gradient_array[ind, 0 ] = gradient\n\n return gradient_array.T \n def full_grad(x):\n grad_array = np.zeros_like(x)\n for i in range(len(x)):\n copied_x = np.copy(x)\n copied_x -= eps\n f_val = cost_wrapper(copied_x)\n copied_x[i, 0] += eps\n f_h_val = cost_wrapper(copied_x)\n gradient = (f_h_val - f_val)/eps*2\n grad_array[i, 0 ] = gradient\n return grad_array.T \n return wrapper, full_grad\n \n x = np.copy(init_x)\n for iter_i in range(iter_nums):\n for i in range(len(x)):\n f_val = cost_wrapper(x)\n # x[i, 0] += eps\n sel_idx_grad_func, full_gradient_func = cost_grad_wrapper(i)\n coord_grad = sel_idx_grad_func(x).T\n gradient_direction = full_gradient_func(x).T\n # if np.abs(coord_grad[i]) < 1.88e-6: # if too small gradient value, line search can't find appropriate alpha.(they return None...)\n # continue\n # f_val_h = cost_function(neutral, x, y)\n # f_grad = (f_val_h-f_val)/eps\n # x[i, 0] -= eps\n re = opt.line_search(cost_wrapper, sel_idx_grad_func, x, -coord_grad)\n # re = opt.line_search(cost_wrapper, sel_idx_grad_func, x, -gradient_direction)\n alpha = re[0]\n # for safety. when we put too small, and opposite gradient direction into line_search, function will return None,\n # this if prevent too small gradient.\n \n if alpha is None : \n alpha = 0\n \n x -= coord_grad*alpha\n\n x[6:,0] = np.clip(x[6:, 0], 0.0, 1.0)\n if i in [0,1,2]:\n x[i] %= np.pi*2\n print(\"iter : \", iter_i, \"i-th of w : \", i,\"cost : \", f_val, \"\\nx\", x.ravel(), \"alpha : \", alpha, \"\")\n\n iter_num = 10\n expr_weights = [np.zeros((expr_num, 1)) for _ in range(len(self.img_and_info.keys()))]\n id_weight = np.zeros((id_num, 1))\n time_t = True\n Q_list = [[] for _ in range(len(self.img_list))]\n Rt_list = [[] for _ in range(len(self.img_list))]\n for i in tqdm.tqdm(range(iter_num)):\n \n for key_id, item in tqdm.tqdm(enumerate(self.img_and_info.values())):\n sel_imgs = [info['img_data'] for info in item ]\n lmk_2ds = np.array([info['lmk_data'] for info in item ])\n \n expr_Q_list = [ self.calc_cam_intrinsic_Q(0, info['img_data'].shape[1], info['img_data'].shape[0]) for info in item ]\n expr_Rt_list = [calc_Rt() for info in item ] # TODO \n\n\n index_list = [ info['index'] for info in item ]\n sel_lmk_idx_list = [ lmk_idx_list[info['index']] for info in item ]\n verts_3ds = [get_combine_bar_model( neutral[sel_lmk_idx, :], ids[:, sel_lmk_idx,:], expr[:, sel_lmk_idx, :], id_weight, expr_weights[key_id]) for sel_lmk_idx in sel_lmk_idx_list]\n \n\n for ii, Q, Rt in zip(index_list, expr_Q_list,expr_Rt_list):\n Q_list[ii] = Q\n Rt_list[ii] = Rt\n \n exp_weight = coordinate_descent(neutral, ids, expr, sel_lmk_idx_list ,lmk_2ds, expr_Rt_list, expr_Q_list)\n for local_i, ii, Q, Rt, mesh_lmk_mapping_idx in enumerate(zip(self, index_list, Q_list, Rt_list, sel_lmk_idx_list)):\n h,w, _ = sel_imgs[local_i].shape\n new_Q = self.find_fit_projection_mat(single_energy_term, neutral, ids, expr, mesh_lmk_mapping_idx, id_weight, exp_weight, Q, Rt, self.convert_focal_length(h,w, 50))\n Q_list[ii] = new_Q \n expr_weights[key_id] = exp_weight\n #===========================================================================================\n # expr_weights.append(exp_weight)\n for index in index_list:\n # time_t = draw_cv(index, time_t, id_weight, expr_weights, cam_scales, cam_rots, cam_tvecs)\n time_t = draw_cv(index,key_id, time_t, id_weight, expr_weights, cam_scales, cam_rots, cam_tvecs)\n path_name = osp.join(\"testdir\", str(index))\n if not os.path.exists(path_name):\n os.makedirs(path_name)\n vv = get_combine_model(id_weight, exp_weight)\n igl.write_triangle_mesh(os.path.join(path_name, \"opt1_iter_num_\" +str(i)+ \".obj\"), vv, self.neutral_mesh_f)\n \n\n # phase 2\n # calculate exact id_weights and contour(optional)\n # res_id_weight = id_weight_fit(self.img_list, cam_scales, cam_rots, cam_tvecs, expr_weights)\n res_id_weight = id_weight_fit(neutral, ids, expr,lmk_idx_list ,self.img_list, cam_scales, cam_rots, cam_tvecs, expr_weights)\n\n def extract_train_set_blendshapes(train_set_images_lmks, neutral_pose, blendshapes):\n \"\"\"\n this method use shape_fit method's actor(user)-specific blendshapes result.\n neutral pose : user-specific neutral pose\n blendshapes : user specific blendshapes\n ===========================================================================\n return\n weights that are extracted from user-specific blendshapes.\n \"\"\"\n v_size, dim = neutral_pose.shape\n b_size, _, _ = blendshapes\n\n\n \n\n \n\n\n \nif __name__ == \"__main__\":\n p = PreProp(\"landmark/meta.yaml\", \"prep_data\")\n p.build()\n print(len(lmk_idx))\n # p.simple_camera_calibration(p.images[0], p.lmks[0], p.meshes[0][0], lmk_idx)\n p.shape_fit(p.id_meshes, p.expr_meshes, lmk_idx)\n\n","repo_name":"fabyday/3D-Shape-Regression-for-Real-time-Facial-Animation","sub_path":"preprop_coordinate_dscent.py","file_name":"preprop_coordinate_dscent.py","file_ext":"py","file_size_in_byte":19770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41090214950","text":"import pickle\nimport os\n\n'''\n[{}, {}, ..., {}]\nrequire \n'attack_score' MPD 1 str->float 2\n'advsrc' 1 str.strip() 2\n'replace_wrod_list' 1 str->list 2\n'word_idx' 1 str->list 2\n'advsrc_pred' 1 str.strip() 2\n'advsrc_bp' 1 str.strip() 2\n'statueTages' 1 str->dict\n'''\n\ndumped_dir = '../dumped/en_de_en/transformer/gogr/job0/TRANSFORMERoracle/_random_order_greedy'\nWORDIDX_file = dumped_dir+'/'+'adv_word_idx.txt'\nADVBACK_file = dumped_dir+'/'+'advback.txt'\nADVPRED_file = dumped_dir+'/'+'advpred.txt'\nADVSRC_file = dumped_dir+'/'+'advsrc.txt'\nMPD_file = dumped_dir+'/'+'record_MPD.txt'\nREPWORDLIST_file = dumped_dir + '/' + 'replace_word_list.txt'\nSTATUE_file = dumped_dir + '/' + 'statetags.txt'\n\noutput_dir = '../wsls_init/'\nif not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\nres_list = []\n\nWORDIDX_op = open(WORDIDX_file, 'r')\nfor wordidx in WORDIDX_op.readlines():\n wordidx = eval(wordidx.strip())\n res_list.append({'word_idx':wordidx})\n\nWORDIDX_op.close()\n\nADVSRC_op = open(ADVSRC_file, 'r')\nadvsrcs = ADVSRC_op.readlines()\nADVPRED_op = open(ADVPRED_file, 'r')\nadvpreds = ADVPRED_op.readlines()\nADVBACK_op = open(ADVBACK_file, 'r')\nadvbacks = ADVBACK_op.readlines()\nMPD_op = open(MPD_file, 'r')\nMPDs = MPD_op.readlines()\nREPWORDLIST_op = open(REPWORDLIST_file, 'r')\nrepword_lists = REPWORDLIST_op.readlines()\nSTATUE_op = open(STATUE_file, 'r')\nstatues = STATUE_op.readlines()\n\nfor i in range(len(advsrcs)):\n res_list[i]['advsrc'] = advsrcs[i].strip()\n res_list[i]['advsrc_pred'] = advpreds[i].strip()\n res_list[i]['advsrc_bp'] = advbacks[i].strip()\n res_list[i]['attack_score'] = eval(MPDs[i].strip())\n res_list[i]['replace_word_list'] = eval(repword_lists[i].strip())\n res_list[i]['statueTages'] = eval(statues[i].strip())\n\n\n\n\nADVSRC_op.close()\nADVPRED_op.close()\nADVBACK_op.close()\nMPD_op.close()\nREPWORDLIST_op.close()\nSTATUE_op.close()\n\nouptut_file = output_dir+'/'+'job0.init'\noutput_op = open(ouptut_file, 'wb')\n\npickle.dump(res_list, output_op)\n\noutput_op.close()","repo_name":"JHL-HUST/AdvNMT-WSLS","sub_path":"tools/generate_start.py","file_name":"generate_start.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"20127440381","text":"from selenium import webdriver\nimport time\nimport math\nimport re\nimport requests\nimport json\nimport subprocess\nfrom pathlib import Path\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom termcolor import colored, cprint\n\nROOT = \"http://localhost:8080\"\nAPILOGIN = \"http://localhost:8080/api/login\"\nADMIN = [\"lordvishwa123@gmail.com\", \"Arduino123\"]\nNONADMIN = [\"lord8266@a.com\", 'Arduino123']\n\n\ndef get_driver():\n options = webdriver.ChromeOptions()\n # Path to your chrome profile\n # options.add_argument(\n # '--user-data-dir=C:\\\\Users\\\\Vishwa\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data')\n # options.add_argument('--profile-directory=Profile 1')\n w = webdriver.Chrome(\n executable_path=\"E:\\SeleniumDrivers\\chromedriver.exe\",\n chrome_options=options)\n return w\n\n\ndef wait(n):\n time.sleep(n)\n\n\nclass MyMovieNavigator:\n def logged_in(self):\n return len(w.find_elements_by_id(\"logoutbtn\")) > 0\n\n def login(self, username, password):\n w.get(ROOT)\n wait(0.3)\n if self.logged_in():\n w.find_element_by_id(\"logoutbtn\").click()\n wait(0.5)\n w.find_element_by_id(\"inputEmail\").send_keys(username)\n w.find_element_by_id(\"inputPassword\").send_keys(password)\n w.find_element_by_id(\"lbtn\").click()\n wait(2)\n if not self.logged_in():\n return False\n else:\n return True\n\n\nclass TestException(Exception):\n def __init__(self, err):\n super().__init__()\n self.err = err\n\n\ndef reset_db():\n proc = subprocess.Popen(\n \"python ../scripts/reset.py\",\n cwd=(Path.cwd().parent/\"scripts\").resolve(), shell=True, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\nclass BackendTestBase:\n def __init__(self):\n self.sess = requests.Session()\n self.login()\n\n def login(self):\n res = self.post_request(APILOGIN, {\n 'email': ADMIN[0], 'password': ADMIN[1]\n })\n\n def post_request(self, url, data):\n res = self.sess.post(url, data=json.dumps(data), headers={\n 'Content-Type': 'application/json'\n })\n return res\n\n def get_request(self, url):\n res = self.sess.get(url)\n return res\n\n\nclass ASeleniumTests:\n\n def list_movies(self):\n w.get(ROOT)\n wait(0.3)\n w.find_element_by_id(\"search_text\").clear()\n w.find_element_by_id(\"search_movie\").click()\n wait(0.5)\n l = len(w.find_elements_by_css_selector(\".movieItem\"))\n if l != 21:\n return False, f'Movie items found: {l}'\n else:\n return True, f'Movie items found: {l}'\n\n def search_term(self, term='avengers', n=3):\n w.find_element_by_id(\"search_text\").clear()\n w.find_element_by_id(\"search_text\").send_keys(term)\n w.find_element_by_id(\"search_movie\").click()\n wait(0.5)\n l = len(w.find_elements_by_css_selector(\".movieItem\"))\n if l != n:\n return False, f'Movie items found: {l}'\n else:\n return True, f'Movie items found: {l}'\n\n def case_insenitive(self, term='avengers', n=3):\n t1 = self.search_term(term)\n t2 = self.search_term(term.upper())\n if not t1 and t2:\n return False, t1[1]+'\\n'+t2[1]\n else:\n return True, t1[1]\n\n def noresults(self, term='2121212'):\n w.find_element_by_id(\"search_text\").clear()\n w.find_element_by_id(\"search_text\").send_keys(term)\n w.find_element_by_id(\"search_movie\").click()\n wait(0.5)\n l = len(w.find_elements_by_css_selector(\".movieItem\"))\n if l:\n return False, f'Movie items found: {l}'\n else:\n return True, f'Movie items found: {l}'\n\n def select_movie(self):\n w.get(ROOT)\n wait(0.3)\n w.find_element_by_id(\"search_text\").clear()\n w.find_element_by_id(\"search_movie\").click()\n wait(0.5)\n m = w.find_elements_by_css_selector(\".movieItem div\")[3]\n m.click()\n wait(0.5)\n if re.search(r'/movie/.*', w.current_url):\n return True, f'Found {w.find_element_by_css_selector(\"h1\").text}'\n else:\n return False, 'Could not load page'\n\n def run_tests(self):\n print(self.list_movies())\n print(self.search_term())\n print(self.case_insenitive())\n print(self.noresults())\n print(self.select_movie())\n\n\nclass ABackendAPITests(BackendTestBase):\n\n def search_movies(self, term=\"\"):\n res = self.post_request(\"http://localhost:8080/search\", {\n \"term\": term\n })\n content = json.loads(res.content.decode())\n return content\n\n def all_movies(self):\n c = self.search_movies()\n l = len(c)\n if len(c) != 21:\n return False, f'Movie items found: {l}'\n else:\n return True, f'Movie items found: {l}'\n\n def search_term(self):\n c = self.search_movies('avengers')\n l = len(c)\n if len(c) != 3:\n return False, f'Movie items found: {l}'\n else:\n return True, f'Movie items found: {l}'\n\n def case_insensitive(self):\n c1 = self.search_movies('avengers')\n c2 = self.search_movies('AVENGERS')\n if len(c1) != len(c2):\n return False, f'Movie items found: {len(c1)}, {len(c2)}'\n else:\n return True, f'Movie items found: {len(c1)}'\n\n def valid_route(self):\n res = self.sess.get(\n \"http://localhost:8080/movie/60797618920c419d02d26e9d\")\n res2 = self.sess.get(\n \"http://localhost:8080/movie/60797618920c419d02d26e9dd\")\n if res.status_code != 200 or res2.status_code != 503:\n return False, f'Found Status { res.status_code} { res2.status_code}'\n else:\n return True, 'Movie routes are handled correctly'\n\n def run_tests(self):\n self.login()\n print(self.all_movies())\n print(self.search_term())\n print(self.case_insensitive())\n print(self.valid_route())\n\n\nclass BSeleniumTests:\n\n def __init__(self, reset=True):\n if reset:\n reset_db()\n print(\"Reset DB\")\n w.get(\"http://localhost:8080/movie/60797618920c419d02d26e9d\")\n wait(1)\n\n def check_shows(self):\n show = w.find_elements_by_css_selector(\".show\")\n\n if not show:\n return False, 'Shows not found'\n else:\n show = show[0]\n return True, 'Show found '+show.text\n\n def select_show(self):\n show = w.find_elements_by_css_selector(\".show\")\n show = show[0]\n show.click()\n wait(0.5)\n if w.current_url[-2:] != '/0':\n return False, 'Show not selected'\n else:\n return True, 'Show is selected'\n\n def select_seats(self):\n seats = w.find_elements_by_css_selector(\".seat\")\n seatsi = [30, 31, 32]\n for s in seatsi:\n s = seats[s]\n s.click()\n wait(0.1)\n p = round(float(w.find_element_by_id(\"price\").text))\n if p != 6000:\n return False, 'Price is not correct'\n else:\n return True, 'Price is correct'\n\n def book_tickets(self):\n b = w.find_element_by_id(\"bookb\")\n b.click()\n wait(1)\n if w.current_url != \"http://localhost:8080/tickets\":\n return False, 'Ticket not booked'\n if w.find_elements_by_css_selector(\".book\"):\n return True, 'Ticket booked'\n return False, 'Booking not found'\n\n def check_seats_booked(self):\n w.get(\"http://localhost:8080/movie/60797618920c419d02d26e9d/0\")\n wait(0.5)\n unavailables = w.find_elements_by_css_selector(\".seat.unavailable\")\n if len(unavailables) != 3:\n return False, 'Seats not blacked'\n else:\n return True, 'Seats are booked'\n\n def run_tests(self):\n print(self.check_shows())\n print(self.select_show())\n print(self.select_seats())\n print(self.book_tickets())\n print(self.check_seats_booked())\n\n\nclass BBackendAPITests(BackendTestBase):\n\n def __init__(self, reset=True):\n super().__init__()\n if reset:\n reset_db()\n print(\"Reset DB\")\n\n def book_tickets(self):\n tickets = [\"30\", \"31\"]\n res = self.post_request(\n \"http://localhost:8080/movie/60797618920c419d02d26e9d/0/book\", {\n 'tickets': tickets\n })\n if res.status_code != 200:\n return False, f'Received {res.status_code}: {res.content.decode()}'\n data = json.loads(res.content.decode())\n if data['data']['cost'] != 4000:\n return False, f\"Received invalid price {data['price']}\"\n return True, f'Received correct response {res.content.decode()}'\n\n def invalid_tickets(self):\n tickets = [\"30\", \"-1-1\"]\n res = self.post_request(\n \"http://localhost:8080/movie/60797618920c419d02d26e9d/0/book\", {\n 'tickets': tickets\n })\n if res.status_code == 200:\n return False, f'Ticket booked for invalid input, Received {res.content.decode()}'\n elif res.status_code == 503:\n return True, f'Received correct error response {res.content.decode()}'\n else:\n return False, f'Recieved invalid status code {res.status_code}'\n\n def unavailable_tickets(self):\n tickets = [\"30\"]\n res = self.post_request(\n \"http://localhost:8080/movie/60797618920c419d02d26e9d/0/book\", {\n 'tickets': tickets\n })\n if res.status_code == 200:\n return False, f'Ticket booked for invalid input, Received {res.content.decode()}'\n elif res.status_code == 403:\n return True, f'Received correct error response {res.content.decode()}'\n else:\n return False, f'Recieved invalid status code {res.status_code}'\n\n def check_tickets_booked(self):\n res = self.get_request(\n \"http://localhost:8080/movie/api/60797618920c419d02d26e9d/0\")\n if res.status_code != 200:\n return False, f'Received {res.status_code}: {res.content.decode()}'\n data = json.loads(res.content.decode())\n show = data['shows'][0]\n if 0 not in show['tickets'][3]:\n return True, f'Ticket confirmed to be booked'\n else:\n return False, 'Ticket was not booked'\n\n def run_tests(self):\n print(self.book_tickets())\n print(self.invalid_tickets())\n print(self.unavailable_tickets())\n print(self.check_tickets_booked())\n\n\nclass CSeleniumTest:\n\n def __init__(self, reset=True):\n if reset:\n reset_db()\n print(\"Reset DB\")\n\n def check_admin_access(self):\n MyMovieNavigator().login(*NONADMIN)\n w.get(\"http://localhost:8080/addmovie\")\n wait(0.5)\n\n if w.current_url == 'http://localhost:8080/addmovie':\n return False, 'Non Admin has invalid access'\n MyMovieNavigator().login(*ADMIN)\n w.get(\"http://localhost:8080/addmovie\")\n wait(0.5)\n if w.current_url != 'http://localhost:8080/addmovie':\n return False, 'Admin doesnt have proper access'\n return True, 'Access is correct'\n\n def show_date_invalid(self):\n w.get(\"http://localhost:8080/movie/60797618920c419d02d26ea3/addshow\")\n wait(0.5)\n w.execute_script(\n \"document.getElementById('date').value=arguments[0]\", '2021-04-19')\n w.find_element_by_css_selector(\".selection button\").click()\n wait(0.2)\n if (w.find_element_by_id(\"date\").get_attribute(\"value\") == ''):\n return True, 'Invalid date is not accepted'\n else:\n return False, 'Invalid date is accepted'\n\n def show_date_valid(self):\n w.execute_script(\n \"document.getElementById('date').value=arguments[0]\", '2021-04-22')\n wait(0.2)\n option = w.find_element_by_css_selector('#slot option[value=\"12:PM\"]')\n option.click()\n wait(0.2)\n w.find_element_by_css_selector(\".selection button\").click()\n if (w.find_element_by_id(\"date\").get_attribute(\"value\") != ''):\n return True, 'Valid date is accepted'\n else:\n return False, 'Valid date is not accepted'\n\n def check_rooms(self):\n WebDriverWait(w, 5).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, \"#room option\")))\n l = w.find_elements_by_css_selector(\"#room option\")\n if len(l) == 10:\n return True, 'All rooms are returned'\n else:\n return False, 'All rooms are not returned'\n\n def add_show(self):\n w.find_element_by_id(\"price\").send_keys(500)\n w.find_element_by_id(\"addshow\").click()\n wait(0.5)\n if w.current_url != 'http://localhost:8080/movie/60797618920c419d02d26ea3':\n return False, 'The show was not added'\n if w.find_elements_by_css_selector(\".show\"):\n return True, 'The show was added'\n return False, 'The show was not added'\n\n def remove_show(self):\n w.get(\"http://localhost:8080/movie/60797618920c419d02d26ea3/0\")\n WebDriverWait(w, 5).until(EC.url_to_be(\n \"http://localhost:8080/movie/60797618920c419d02d26ea3/0\"))\n WebDriverWait(w, 2).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, \"#remove_show\"))).click()\n w.find_element_by_id(\"remove_show\").click()\n try:\n WebDriverWait(w, 5).until(EC.url_to_be(\n \"http://localhost:8080/movie/60797618920c419d02d26ea3\"))\n return True, 'The show was removed'\n except TimeoutException:\n return False, 'The show was not removed'\n\n def add_movie(self):\n w.get(\"http://localhost:8080/addmovie\")\n try:\n elem = WebDriverWait(w, 4).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, \"#search\")))\n w.find_element_by_id(\"imdbid\").send_keys(\"tt4154796\")\n elem.click()\n a = WebDriverWait(w, 4).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, \"#addm\")))\n a.click()\n return True, 'The movie was added'\n except TimeoutException:\n return False, 'The movie could not be added'\n\n def remove_movie(self):\n try:\n WebDriverWait(w, 4).until(EC.element_to_be_clickable(\n (By.CSS_SELECTOR, '#link'))).click()\n WebDriverWait(w, 4).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, '#removeMovie'))).click()\n WebDriverWait(w, 4).until(EC.url_to_be(\"http://localhost:8080/\"))\n return True, 'The movie was removed'\n except TimeoutException:\n return False, 'The movie was not removed'\n\n def run_tests(self):\n print(self.check_admin_access())\n print(self.show_date_invalid())\n print(self.show_date_valid())\n print(self.check_rooms())\n print(self.add_show())\n print(self.remove_show())\n print(self.add_movie())\n print(self.remove_movie())\n\n\nclass CBackendAPITests(BackendTestBase):\n\n def __init__(self, reset=True):\n super().__init__()\n if reset:\n reset_db()\n print(\"Reset DB\")\n\n def will_showdate_collide(self):\n mid = '60797618920c419d02d26e9d'\n res = self.post_request(f'http://localhost:8080/movie/{mid}/will_showdate_collide', {\n 'date': '2021-04-22'\n })\n if res.status_code != 200:\n return False, 'Route isnt working'\n data = json.loads(res.content.decode())\n if data['result']:\n return True, 'Returned correct result'\n return False, 'Returned wrong result'\n\n def get_free_rooms(self):\n mid = '60797618920c419d02d26e9f'\n res = self.post_request(f'http://localhost:8080/movie/{mid}/free_rooms', {\n 'date': '2021-04-22',\n 'slot': '9:AM'\n })\n if res.status_code != 200:\n return False, 'Route isnt working'\n data = json.loads(res.content.decode())\n if sorted(data['result']) == [0, 1, 3, 4, 5, 6, 7, 8, 9]:\n return True, 'Returned correct result'\n return False, 'Returned wrong result'\n\n def add_show_valid(self):\n mid = '60797618920c419d02d26e9f'\n res = self.post_request(f'http://localhost:8080/movie/{mid}/addshow', {\n 'date': '2021-04-22',\n 'slot': '9:AM',\n 'room': 0,\n 'price': 500\n })\n if res.status_code != 200:\n return False, 'Route isnt working'\n data = json.loads(res.content.decode())\n if data['result'] == 'ok':\n return True, 'Show was added'\n return False, 'Show wasnt added'\n\n def add_show_existing(self):\n mid = '60797618920c419d02d26e9f'\n res = self.post_request(f'http://localhost:8080/movie/{mid}/addshow', {\n 'date': '2021-04-22',\n 'slot': '9:AM',\n 'room': 0,\n 'price': 500\n })\n if res.status_code != 200:\n return True, 'Existing show was identified'\n else:\n return False, 'Existing show was not identified'\n\n def add_movie_valid(self):\n tt = 'tt4154796'\n res = self.post_request(f'http://localhost:8080/addmovie/'+tt, {})\n if res.status_code == 200:\n return True, 'Movie was added successfully'\n else:\n return False, 'Movie was not added'\n\n def add_movie_invalid(self):\n tt = 'hahahsdd3w'\n res = self.post_request(f'http://localhost:8080/addmovie/'+tt, {})\n if res.status_code != 200:\n return True, 'Invalid Movie was identified'\n else:\n return False, 'Invalid Movie was not identified'\n\n def run_tests(self):\n print(self.will_showdate_collide())\n print(self.get_free_rooms())\n print(self.add_show_valid())\n print(self.add_show_existing())\n print(self.add_movie_valid())\n print(self.add_movie_invalid())\n\n\nAS = [\n {\n 'name': 'List All Movies',\n 'desc': 'List all movies in the home page and check if all are shown',\n 'func': ASeleniumTests.list_movies\n },\n {\n 'name': 'Search for a term',\n 'desc': 'Search for term \"avengers\" and verify results',\n 'func': ASeleniumTests.search_term\n },\n {\n 'name': 'Case Insensitive search',\n 'desc': 'Make sure searches can be case insensitive',\n 'func': ASeleniumTests.case_insenitive\n },\n {\n 'name': 'A term not found',\n 'desc': 'Make sure no results are returned when term doesnt match all movies',\n 'func': ASeleniumTests.noresults\n },\n {\n 'name': 'Select a movie',\n 'desc': 'Select a movie and check if it shows more details about it when clicked',\n 'func': ASeleniumTests.select_movie\n }\n]\nAB = [\n {\n 'name': 'Search route: EMPTY TERM',\n 'desc': 'Search for an empty and verify results ',\n 'func': ABackendAPITests.all_movies\n },\n {\n 'name': 'Search route: Avengers',\n 'desc': 'Search for a term and verify results ',\n 'func': ABackendAPITests.search_term\n },\n {\n 'name': 'Search route: Case insensitive',\n 'desc': 'Make sure searches are case insensitive ',\n 'func': ABackendAPITests.case_insensitive\n },\n {\n 'name': 'Movie routes ',\n 'desc': 'Check a valid movie route and an invalid, verify results',\n 'func': ABackendAPITests.valid_route\n }\n]\nBS = [\n {\n 'name': 'Check Shows',\n 'desc': 'Check if shows are displayed for a movie',\n 'func': BSeleniumTests.check_shows\n },\n {\n 'name': 'Select Show',\n 'desc': 'Select a show and verify seat matrix is displayed',\n 'func': BSeleniumTests.select_show\n },\n {\n 'name': 'Select seats',\n 'desc': 'Select seats and make sure the price displayed is correct',\n 'func': BSeleniumTests.select_seats\n },\n {\n 'name': 'Book seats',\n 'desc': 'Book selected seats and make sure the booking is made',\n 'func': BSeleniumTests.book_tickets\n },\n {\n 'name': 'Check seats are booked',\n 'desc': 'Verify that the seats booked are black when visiting it again',\n 'func': BSeleniumTests.check_seats_booked\n }\n]\nBB = [\n {\n 'name': 'Book ticket route',\n 'desc': 'Request to book tickets which are available and valid, verify response',\n 'func': BBackendAPITests.book_tickets\n },\n {\n 'name': 'Book ticket route: Invalid tickets',\n 'desc': 'Request to book tickets with invalid body, verify response',\n 'func': BBackendAPITests.invalid_tickets\n },\n {\n 'name': 'Book ticket route: Unavailable tickets',\n 'desc': 'Request to book unavailable tickets, verify response',\n 'func': BBackendAPITests.unavailable_tickets\n },\n {\n 'name': 'Show route',\n 'desc': 'Verify that booked tickets are really booked',\n 'func': BBackendAPITests.check_tickets_booked\n }\n\n]\nCS = [\n {\n 'name': 'Check Admin access',\n 'desc': 'Login as different users, check that only admin can add movies',\n 'func': CSeleniumTest.check_admin_access\n },\n {\n 'name': 'Show date invalid',\n 'desc': 'Select an invalid date and check if it isnt allowed',\n 'func': CSeleniumTest.show_date_invalid\n },\n {\n 'name': 'Show date valid',\n 'desc': 'Select an valid date and check if doesnt allow it',\n 'func': CSeleniumTest.show_date_valid\n },\n {\n 'name': 'Check if rooms are available',\n 'desc': 'Select show date and slot, verify if all rooms are returned',\n 'func': CSeleniumTest.check_rooms\n },\n {\n 'name': 'Add a show',\n 'desc': 'Add show with required details, verify that it is added',\n 'func': CSeleniumTest.add_show\n },\n {\n 'name': 'Remove a show',\n 'desc': 'Remove a show, verify that is removed',\n 'func': CSeleniumTest.remove_show\n },\n {\n 'name': 'Add a movie using imdb id',\n 'desc': 'Add a movie, verify that is added',\n 'func': CSeleniumTest.add_movie\n },\n {\n 'name': 'Remove a movie',\n 'desc': 'Remove a movie, verify that is removed',\n 'func': CSeleniumTest.remove_movie\n },\n\n]\nCB = [\n {\n 'name': 'will_showdate_collide',\n 'desc': 'Verify that the same date cant be chosen for a show again',\n 'func': CBackendAPITests.will_showdate_collide\n },\n {\n 'name': 'get_free_rooms',\n 'desc': 'Verify that all free rooms are returned for a particular date and slot',\n 'func': CBackendAPITests.get_free_rooms\n },\n {\n 'name': 'add_show_valid',\n 'desc': 'Verify that a valid show is added',\n 'func': CBackendAPITests.add_show_valid\n },\n {\n 'name': 'add_show_existing',\n 'desc': 'Verify that an exisiting show cant be added again',\n 'func': CBackendAPITests.add_show_existing\n },\n {\n 'name': 'add_movie_valid',\n 'desc': 'Verify that a new movie can be added with imdb id',\n 'func': CBackendAPITests.add_movie_valid\n },\n {\n 'name': 'add_movie_invalid',\n 'desc': 'Verify that a invalid movie is not added which doesnt exist in imdb',\n 'func': CBackendAPITests.add_movie_invalid\n },\n\n]\n\nRUNNER = [\n {\n 'usecase': 'Movie Listing and Selection',\n 'cats': [\n {\n 'cat': 'Backend Tests',\n 'c': ABackendAPITests,\n 'data': AB\n },\n {\n 'cat': 'Selenium Tests',\n 'c': ASeleniumTests,\n 'data': AS\n }\n ]\n },\n {\n 'usecase': 'Seat Selection and Booking',\n 'cats': [\n {\n 'cat': 'Backend Tests',\n 'c': BBackendAPITests,\n 'data': BB\n },\n {\n 'cat': 'Selenium Tests',\n 'c': BSeleniumTests,\n 'data': BS\n }\n ]\n },\n {\n 'usecase': 'Event Management',\n 'cats': [\n {\n 'cat': 'Backend Tests',\n 'c': CBackendAPITests,\n 'data': CB\n },\n {\n 'cat': 'Selenium Tests',\n 'c': CSeleniumTest,\n 'data': CS\n }\n ]\n },\n]\nprint(\"Loading Driver\")\nw = get_driver()\nprint(\"Loaded driver\")\n\nMyMovieNavigator().login(*ADMIN)\n\nprint(\"Press enter to continue\")\ninput()\n\nprint(\"Resetting DB\")\nreset_db()\nprint(\"Reset DB\")\n\ns = ABackendAPITests()\ni = 0\npassed = 0\nfor r in RUNNER:\n usecase = r['usecase']\n print(usecase+\"\\n\")\n for cat in r['cats']:\n # if cat['cat']!='Backend Tests' or usecase!='Event Management':\n # break\n print(cat['cat']+\"\\n\")\n c = cat['c']()\n for t in cat['data']:\n try:\n cprint(f\"{i+1}: {t['name']}\",'yellow')\n print(f\"-> {t['desc']}\")\n res, data = t['func'](c)\n print(\"Result: \", colored(\"PASSED\",'green') if res else colored(\"FAILED\",'red')),\n cprint(data,'green')\n passed+=1\n except Exception as e:\n cprint(\"Result: FAILED\",'red')\n cprint(str(e),'red')\n print(\"\\n--------------------------------------\\n\")\n i += 1\n # wait(2.5)\n \ncprint(\"PASSED {} / {}\".format(passed,i))\nw.close()\n","repo_name":"sreesh2411/OOAD-Project","sub_path":"MyMovie/tests/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":25088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39508061683","text":"for _ in range(int(input())):\n a,b=map(int,input().split())\n money=0\n if a!=0:\n if a==1: money+=500\n elif a<=3: money+=300\n elif a<=6: money+=200\n elif a<=10: money+=50\n elif a<=15: money+=30\n elif a<=21: money+=10\n if b!=0:\n if b==1: money+=512\n elif b<=3: money+=256\n elif b<=7: money+=128\n elif b<=15: money+=64\n elif b<=31: money+=32\n money*=10000\n print(money)","repo_name":"njw1204/BOJ-AC","sub_path":"problem/10000~19999/15953/15953.py3.py","file_name":"15953.py3.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38783120218","text":"\"\"\"\nThis program finds an element in a sorted array\n\"\"\"\n\n\ndef binary_search(arr, low, high, element):\n if high >= low:\n mid = int((low + high) / 2)\n\n if arr[mid] == element:\n return mid\n\n if arr[mid] > element:\n return binary_search(arr, low, mid - 1, element)\n\n return binary_search(arr, mid + 1, high, element)\n\n return False\n\n\ndef exponential_search(arr, length, element):\n if arr[0] == element:\n return 0\n\n i = 1\n\n while i < length and arr[i] < element:\n i *= 2\n\n return binary_search(arr, i / 2, min(i, length), element)\n\n\narray = []\n\nelements = int(input('Enter the number of elements in the array: '))\n\nfor i in range(elements):\n array.append(int(input(\"Enter array element: \")))\n\narray.sort()\n\nx = int(input('Enter element to search: '))\n\nresult = exponential_search(array, elements, x)\n\nif result:\n print('The element is present at index: ', result)\nelse:\n print('The element is not present.')\n\ninput()\n","repo_name":"cartoonshow57/SmallPythonPrograms","sub_path":"exponential_search.py","file_name":"exponential_search.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74152903624","text":"from flask import request, jsonify\nfrom . import main\nfrom ..models import AlarmRecord, PonAlarmRecord, Permission, LineDataBank, PermissionIP, Interfaces, Device\nfrom .. import logger, db, work_q, nesteddict, redis_db\nfrom ..proccessing_data import datatable_action\nfrom ..proccessing_data.proccess.public_methods import new_data_obj\nimport re\nfrom flask_login import current_user\nfrom collections import defaultdict\nfrom ..decorators import permission_ip\nfrom ..common import db_commit, success_return, false_return\n\n\n@main.route('/sync/interface', methods=[\"POST\"])\n@permission_ip(PermissionIP)\ndef sync_interface():\n data = request.json\n # if not line:\n # return datatable_action.create(**data)\n # else:\n # return datatable_action.update(**data)\n logger.debug(f'Receive data from device synchronize {data}')\n lock = 'sync_interface::' + data.get('order_number')\n if redis_db.exists(lock):\n redis_db.delete(lock)\n line = Device.query.filter_by(ip=data.get('order_number')).first()\n if not line:\n return jsonify(\n {'status': 'false', 'content': f\"The order number {data.get('order_number')} does not exist!\"})\n\n logger.debug(f'The length of sync interface callback result is {len(data)}')\n if len(data) > 0 and data.get('state') == 1:\n # do something to update the interface data for this device\n line.device_name = data.get(\"sysname\")\n for interface, int_info in data.get(\"interface\").items():\n update_interface = new_data_obj(\"Interfaces\", **{\"interface_name\": interface, \"device\": line.id})\n update_interface.interface_desc = int_info.get(\"DESC\")\n update_interface.interface_type = int_info.get(\"PORT\")\n update_interface.interface_status = True if int_info.get(\"PHY\") == \"up\" else False\n db.session.add(update_interface)\n db_commit()\n if int_info.get(\"ETH\"):\n for eth_int in int_info.get(\"ETH\"):\n logger.debug(f\"Etrunk group interface: {eth_int}\")\n new_eth_int = new_data_obj(\"Interfaces\", **{\"interface_name\": eth_int,\n \"device\": line.id})\n new_eth_int.parent = update_interface\n db.session.add(new_eth_int)\n db.session.add(line)\n return jsonify(db_commit())\n else:\n logger.warning(f\"{data} no info for the interface\")\n # 这里要根据具体结果修改一下\n return jsonify({'status': 'true'})\n else:\n return jsonify({'status': 'false', 'content': 'The device is not locked'})\n\n\n@main.route('/verify/ring', methods=[\"POST\"])\n@permission_ip(PermissionIP)\ndef verify_ring():\n data = request.json\n logger.debug(f'Receive data from verify ring {data}')\n if redis_db.exists('check_rrpp_lock') and redis_db.get('check_rrpp_lock') == data.get('order_number'):\n redis_db.delete('check_rrpp_lock')\n result = data.get('data')\n state = 0\n line = LineDataBank.query.filter_by(line_code=data.get('order_number')).first()\n logger.debug(f'length of the result is {len(result)}')\n if len(result) > 0:\n for r in result:\n state += r.get('state')\n logger.debug(f'now the state value is {state}')\n if state == len(result):\n line.validate_rrpp_status = 1\n else:\n line.validate_rrpp_status = 0\n\n try:\n db.session.add(line)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n else:\n pass\n # 这里要根据具体结果修改一下\n\n return jsonify({'status': 'ok'})\n\n\n@main.route('/oss', methods=[\"POST\"])\n@permission_ip(PermissionIP)\ndef oss():\n data = request.json\n data['original'] = 'oss'\n data['function'] = 'datatable_action'\n logger.debug(f'Receive data from oss {data}')\n work_q.put(data)\n return jsonify({'status': 'ok'})\n\n\n@main.route('/delete_alarm_record', methods=['POST'])\n@permission_ip(PermissionIP)\ndef delete_alarm_record():\n \"\"\"\n 用于删除告警记录。alarm_record表不删除,只是将alarm_type修改为999;如果是alarm_type 为4, 那么要删除pon_alarm_record中的记录\n POST的是\n :return:\n \"\"\"\n try:\n if not current_user.can(Permission.NETWORK_MANAGER):\n logger.warn('This user\\'s action is not permitted!')\n return jsonify({'status': 'Fail', 'content': '此账号没有权限删除告警记录'})\n print('delete')\n alarm_id = request.json\n print(alarm_id)\n\n id = alarm_id['alarm_id']\n\n print(id)\n\n print('start check')\n\n alarm_record = AlarmRecord.query.filter_by(id=id).first()\n\n print(alarm_record)\n print(alarm_record.alarm_type)\n\n if alarm_record.alarm_type == 4 or alarm_record.alarm_type == 3:\n print(alarm_record.content)\n try:\n ontid = [int(i) for i in eval(re.findall(r'(\\{*.+\\})', alarm_record.content)[0])]\n except Exception as e:\n ontid = ['PON']\n ip = re.findall(r'(\\d+\\.\\d+\\.\\d+\\.\\d+)', alarm_record.content)[0]\n f, s, p = re.findall(r'(\\d+/\\d+/\\d+)', alarm_record.content)[0].split('/')\n print(f, s, p, ontid, ip)\n for ont in ontid:\n pon_alarm_record = PonAlarmRecord.query.filter_by(ip=ip, frame=f, slot=s, port=p, ontid=ont).first()\n if not pon_alarm_record:\n continue\n db.session.delete(pon_alarm_record)\n db.session.commit()\n\n alarm_record.alarm_type = 999\n db.session.add(alarm_record)\n db.session.commit()\n\n return jsonify({'status': 'OK', 'content': '记录已删除'})\n\n except Exception as e:\n print(e)\n return jsonify({'status': 'Fail', 'content': str(e)})\n\n\n@main.route('/assets/', methods=[\"GET\", \"POST\"])\ndef assets(name):\n logger.debug(f\">>> Get uuid name: {name}\")\n return jsonify({'status': 'ok', \"content\": name})\n","repo_name":"KoiosChen/gamefast","sub_path":"app/main/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69890319627","text":"\"\"\"\nSummary: \nImports json, as the data will be saved in a json file.\n\nTo save a new enty, it will: \n1. Open or create json file\n2. Load content\n3. Add new entry\n4. Overwrite old json-content with new one. \nAlso used for only loading content. \nReturns:\nNew json file, content of json file.\n\"\"\"\nimport json\n\n\"\"\"\nSummary: \nLoads the json file, if non existent creates a file, with the needed dictionary. \n \nReturns:\njson_daten \n\"\"\"\ndef load_json(name):\n\tjson_daten = {}\n\ttry:\n\t\twith open(\"data/data.json\") as open_file:\n\t\t\tjson_daten = json.load(open_file)\n\texcept FileNotFoundError:\n\t\t\tjson_daten = {\n\t\t\t\tname: {\n\n\t\t\t\t}\n\t\t\t}\n\treturn json_daten\n\n\"\"\"\nsave_json()\nSummary: \nSaves new dict, with added content. \n\nReturns:\njson file \n\"\"\"\ndef save_json(data):\n\twith open(\"data/data.json\", \"w\") as open_file:\n\t\tjson.dump(data, open_file, ensure_ascii=False, indent=4)\n \ndef aktivitaet_speichern(name, date, beginn, ende, verantwortung, beteiligung):\n \"\"\"\n Summary: \n Gets added content for new entry and adds it to dict in json file. \n \n Returns:\n current json file. \n \"\"\"\n #split date for nice format\n date = date.split(\"-\")\n DD = str(date[2])\n MM = str(date[1])\n YYYY = str(date[0])\n date = DD + \".\" + MM + \".\" + YYYY\n\t#reformat list from 'beteiligung' for nice output\n seperator = \", \"\n beteiligung = seperator.join(beteiligung)\n\n name = name.capitalize()\n\n json_daten = load_json(name)\n json_daten[name] = {\n \"Aktivität\": name, \"Datum\": date, \"Beginn\": beginn, \"Ende\": ende, \"Verantwortung\": verantwortung, \"Stufe TN\": beteiligung\n }\n save_json(json_daten)\n\n\ndef load_overview(): \n\t\"\"\"\n Summary: \n Used for overview_termine.html to fill table, as there is no variable \"name\" given on this page. \n \n Returns:\n json_daten \n \"\"\"\n\ttry: \n\t\twith open(\"data/data.json\") as f: \n\t\t\tdata = json.load(f)\n\texcept FileNotFoundError:\n\t\t\tdata = {\n\t\t\t}\n\treturn data\n\t\n\ndef count_entrys():\n\t\"\"\"\n\tSummary: \n\tFunction for adding up the entrys.\n\n\treturn: \n\tamount of entrys in current dict. \n\t\"\"\"\n\tjahresplan = load_overview()\n\teintraege = [] \n\tfor a in jahresplan:\n\t\teintraege.append(a)\n\n\tanz = int(len(eintraege))\n\treturn anz","repo_name":"annibaum96/PROG2","sub_path":"plannerbrunch/libs/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11626682606","text":"from supercharge import Controller\n\nfrom google.appengine.api import users\n\nclass Test(Controller):\n\n def index(self):\n self.view = \"\"\n user = users.get_current_user()\n if user:\n self.render('Hello '+user.nickname())\n else:\n self.loginUser()\n \n def testParams(self):\n self.render(self.getParams())\n \n def logout(self):\n user = users.get_current_user()\n if user:\n self.logoutUser()\n else:\n self.redirect('/')\n \n def tested(self):\n pass\n \n\n","repo_name":"RaVbaker/supercharge","sub_path":"controllers/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17954482502","text":"import math\n\nimport pandas\n\nfrom core.financial_modelling.constants.earthquake_constants import EARTH_RADIUS, PolicyConstantString\nfrom core.financial_modelling.policy.earthquake import multi_asset_earthquake_policy as earthquake_policy_module\nfrom core.financial_modelling.scenario_generator.earthquake import usgs_earthquake_scenario_generator\n\nTIME_COLUMN = \"time\"\nPAYOUT_COLUMN = \"payout\"\nMAGNITUDE_COLUMN = \"mag\"\nDISTANCE_COLUMN = \"distance\"\nLATITUDE_COLUMN = \"latitude\"\nLONGITUDE_COLUMN = \"longitude\"\n\n\n# POINT TO POINT\ndef compute_haversine (latitude_1, longitude_1, latitude_2, longitude_2, radius) ->float:\n phi_1 = math.radians (latitude_1)\n phi_2 = math.radians (latitude_2)\n lambda_1 = math.radians (longitude_1)\n lambda_2 = math.radians (longitude_2)\n sin_half_delta_phi = math.sin ((phi_2 - phi_1) * 0.5)\n sin_half_delta_lambda = math.sin ((lambda_2 - lambda_1) * 0.5)\n cos_latitude_1 = math.cos (phi_1)\n cos_latitude_2 = math.cos (phi_2)\n h = sin_half_delta_phi ** 2 + cos_latitude_1 * cos_latitude_2 * (sin_half_delta_lambda ** 2)\n return 2 * radius * math.asin (math.sqrt (h))\n\ndef get_haversine_distance (latitude_list, longitude_list, center_latitude, center_longitude) ->list:\n if not len (latitude_list) == len (longitude_list) :\n raise Exception (\n f\"Latitude and Longitude must have the same size: {len (latitude_list)} vs {len (longitude_list)} \")\n result = list ()\n for lat, long in zip (latitude_list, longitude_list) :\n result.append (compute_haversine (lat, long, center_latitude, center_longitude, EARTH_RADIUS))\n return result\n\ndef compute_payouts (\n earthquake_data: pandas.DataFrame, policy: earthquake_policy_module.MultiAssetEarthquakePolicy\n) -> pandas.Series :\n print(earthquake_data)\n scenario_parameters = dict ()\n scenario_parameters[PolicyConstantString.asset_locations] = policy.asset_locations\n scenario_parameters[PolicyConstantString.protection_layers] = [\n { PolicyConstantString.max_radius : layer.max_radius, PolicyConstantString.min_magnitude : layer.min_magnitude }\n for layer in policy.protection_layers]\n scenario_generator = usgs_earthquake_scenario_generator.USGSEarthQuakeScenarioGenerator (\n parameters = scenario_parameters, earthquake_dataframe = earthquake_data)\n result = policy.compute_payout_multi_scenario (scenario_generator.get_data ())\n print(result)\n payout_series = result.apply (lambda x : x.scenario_payout)\n #payout_series.detailed_analysis = result\n return payout_series\n\ndef compute_burning_cost (payouts: (dict, pandas.Series), start_year, end_year) ->float:\n if len(payouts) == 0:\n return 0.0\n if isinstance (payouts, dict) :\n payouts = pandas.Series (payouts)\n return ((payouts.index >= start_year) * (payouts.index <= end_year) * payouts).sum () / (end_year - start_year + 1)\n\n\nif __name__ == \"__main__\" :\n print (get_haversine_distance ([1, 3], [2, 5], 3, 4))\n\n layer_1 = earthquake_policy_module.EarthquakeProtectionLayer (layer_id = 1, max_radius = 10.0, min_magnitude = 4.5,\n payout_ratio = 1.0)\n layer_2 = earthquake_policy_module.EarthquakeProtectionLayer (layer_id = 2, max_radius = 50.0, min_magnitude = 5.5,\n payout_ratio = 0.75)\n layer_3 = earthquake_policy_module.EarthquakeProtectionLayer (layer_id = 3, max_radius = 200.0, min_magnitude = 6.5,\n payout_ratio = 0.5)\n protection_layers = [layer_1, layer_2, layer_3]\n asset_locations = list ()\n asset_locations.append ({\n earthquake_policy_module.DataConstantString.latitude : 35.025,\n earthquake_policy_module.DataConstantString.longitude : 25.763\n })\n asset_locations.append ({\n earthquake_policy_module.DataConstantString.latitude : 36,\n earthquake_policy_module.DataConstantString.longitude : 25.7\n })\n asset_locations.append ({\n earthquake_policy_module.DataConstantString.latitude : 35.3,\n earthquake_policy_module.DataConstantString.longitude : 25.76\n })\n asset_locations = pandas.DataFrame (asset_locations)\n print (asset_locations)\n print (layer_1.__dict__)\n print (layer_2.__dict__)\n print (layer_3.__dict__)\n reporting_levels = [earthquake_policy_module.ReportingLevel.EVENT,\n earthquake_policy_module.ReportingLevel.EVENT_LAYER]\n policy_1 = earthquake_policy_module.MultiAssetEarthquakePolicy (policy_id = 1, policy_name = \"policy 1\",\n asset_locations = asset_locations,\n protection_layers = protection_layers, limit = 100,\n reporting_levels = reporting_levels)\n print (policy_1.__dict__)\n\n from data_collecting.usgs import usgs_helper\n from datetime import datetime\n\n earthquake_data = usgs_helper.get_earthquake_data_within_circle (latitude = 35.025, longitude = 25.763,\n radius = 200, minimum_magnitude = 4.5,\n end_date = datetime (year = 2021, month = 12,\n day = 31),\n start_date = datetime (year = 1911, month = 1,\n day = 1))\n payouts = compute_payouts (earthquake_data, policy_1)\n print (payouts)\n print (payouts.detailed_analysis)\n print (compute_burning_cost (payouts, 1950, 2021))\n print (compute_burning_cost (payouts, 1990, 2021))\n\n","repo_name":"feilongbk/descartes-soft-eng-khoi-nguyen","sub_path":"src/earthquakes/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36693937186","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ugc', '0008_prices'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='prices',\n name='company',\n ),\n migrations.DeleteModel(\n name='Prices',\n ),\n ]\n","repo_name":"notpratheek/stockmemaybe","sub_path":"ugc/migrations/0009_auto_20150422_1852.py","file_name":"0009_auto_20150422_1852.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70434763465","text":"print(\"\"\"*********************\r\nHESAP MAKİNASI PROGRAMI :)\r\nİŞLEMLER:\r\n1)toplama\r\n2)çıkarma\r\n3)çarpma\r\n4)bölme\r\n**************************\"\"\")\r\na = int(input(\"birinci sayıyı giriniz: \"))\r\nb = int(input(\"ikinci sayıyı giriniz: \"))\r\n\r\nişlem = input(\"işlem giriniz:\")\r\nif işlem == \"1\":\r\n print(\"{} in {} ile toplamı {}'dir.\".format(a,b,a+b))\r\nelif işlem == \"2\":\r\n print(\"{} in {} ile farkı {} dır.\".format(a,b,a-b))\r\nelif işlem ==\"3\":\r\n print(\"{} in {} ile çarpımı {} dir.\".format(a,b,a*b))\r\nelif işlem == \"4\":\r\n print(\"{} in {} ile bölümü {} dir.\".format(a,b,a/b))\r\nelse:\r\n print(\"Geçersiz İşlem............\")\r\n","repo_name":"KubraCollu/Basic-Exercise","sub_path":"Koşullu Durumlar/Basit Hesap Makinesi.py","file_name":"Basit Hesap Makinesi.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4028936146","text":"import torch\nfrom tqdm import tqdm\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import DataLoader\nfrom transformers import default_data_collator\n\nfrom .utils import VisionDataset, TextDataset\n\n\n\nclass SNAPDemo:\n def __init__(self, vision_encoder,\n batch_size: int = 32, max_len: int = 64, device='cuda'):\n \"\"\" Initializes CLIPDemo\n it has the following functionalities:\n image_search: Search images based on text query\n \n Args:\n vision_encoder: Fine-tuned vision encoder\n device (torch.device): Running device\n batch_size (int): Size of mini-batches used to embeddings\n max_length (int): Tokenizer max length\n \n \"\"\"\n self.vision_encoder = vision_encoder.eval().to(device)\n self.batch_size = batch_size\n self.device = device\n self.max_len = max_len\n self.text_embeddings_ = None\n self.image_embeddings_ = None\n \n\n def compute_image_embeddings(self, image_paths: list):\n \"\"\" Compute image embeddings for a list of image paths\n Args:\n image_paths (list[str]): An image database\n \"\"\"\n self.image_paths = image_paths\n\n datalodear = DataLoader(VisionDataset(\n image_paths=self.image_paths), batch_size=self.batch_size)\n embeddings = []\n with torch.no_grad():\n for images in tqdm(datalodear, desc='computing image embeddings'):\n \n image_embedding = self.vision_encoder(\n pixel_values=images.to(self.device)).pooler_output\n \n embeddings.append(image_embedding)\n self.image_embeddings_ = torch.cat(embeddings)\n\n def image_query_embedding(self, image):\n image = VisionDataset.preprocess(image).unsqueeze(0)\n with torch.no_grad():\n image_embedding = self.vision_encoder(\n image.to(self.device)).pooler_output\n return image_embedding\n\n def most_similars(self, embeddings_1, embeddings_2):\n values, indices = torch.cosine_similarity(\n embeddings_1, embeddings_2).sort(descending=True)\n return values.cpu(), indices.cpu()\n\n def image_search(self, image_path: str, top_k=10):\n \"\"\" Search images based on text query\n Args:\n image_path (str): image query \n top_k (int): number of relevant images \n \"\"\"\n \n print(image_path)\n image = Image.open(image_path)\n image_embedding = self.image_query_embedding(image)\n _, indices = self.most_similars(self.image_embeddings_, image_embedding)\n\n matches = np.array(self.image_paths)[indices][:top_k]\n _, axes = plt.subplots(2, int(top_k/2), figsize=(15, 5))\n for match, ax in zip(matches, axes.flatten()):\n ax.imshow(Image.open(match).resize((224, 224)))\n ax.axis(\"off\")\n plt.show()\n def image_query(self, image_path: str, top_k=10):\n \"\"\" Search images based on text query\n Args:\n image_path (str): image query \n top_k (int): number of relevant images \n \"\"\"\n \n print(image_path)\n image = Image.open(image_path)\n image_embedding = self.image_query_embedding(image)\n _, indices = self.most_similars(self.image_embeddings_, image_embedding)\n\n matches = np.array(self.image_paths)[indices][:top_k]\n return matches\n \n ","repo_name":"narsisn/Transformer-based-Image-Retrieval","sub_path":"src/application/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"18186645643","text":"class EmptyError(Exception): pass\n\nclass Stack:\n\n def __init__(self,postupnost=None):\n '''inicializuje zoznam'''\n self._prvky = []\n\n if postupnost is not None:\n for i in postupnost:\n self.push(i)\n\n def __repr__(self):\n return f' Stack({tuple(self._prvky)})'\n\n\n def push(self, data):\n '''na vrch zásobníka vloží novú hodnotu'''\n self._prvky.append(data)\n\n def pop(self):\n '''z vrchu zásobníka vyberie hodnotu, alebo vyvolá EmptyError'''\n if self.is_empty():\n raise EmptyError('prazdny zasobnik')\n return self._prvky.pop()\n\n def top(self):\n '''z vrchu zásobníka vráti hodnotu, alebo vyvolá EmptyError'''\n if self.is_empty():\n raise EmptyError('prazdny zasobnik')\n return self._prvky[-1]\n\n def is_empty(self):\n '''zistí, či je zásobník prázdny'''\n return self._prvky == []\n\ndef pocitaj(vyraz):\n s = Stack()\n for prvok in vyraz.split():\n if prvok == '+':\n s.push(s.pop() + s.pop())\n elif prvok == '-':\n s.push(-s.pop() + s.pop())\n elif prvok == '*':\n s.push(s.pop() * s.pop())\n elif prvok == '/':\n op2 = s.pop() # môžeme zapísať aj: op2, op1 = s.pop(), s.pop()\n op1 = s.pop()\n s.push(op1 // op2)\n else:\n s.push(int(prvok))\n return s.pop()\n\n\n","repo_name":"BeloIV/Programovanie_2","sub_path":"1/prednaska.py","file_name":"prednaska.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"sk","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70389273226","text":"## Write a function named items_price that accepts two lists as parameters.\n## The first list contains the quantities of n different items,\n## the second list contains the prices that correspond to those n items\n## respectively. Now, calculate the total amount of money required to\n## purchase those items. Assume that both the lists will have equal lengths. \n\ndef items_price(lista, listb):\n i = 0\n total = 0\n for i in range(len(lista)):\n total = total + lista[i]*listb[i]\n return total\n\nprint(items_price([1,2,3],[100,100,100]))\n","repo_name":"lipingzhu/PythonEdX","sub_path":"midterm_part5.py","file_name":"midterm_part5.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24665602014","text":"from glob import glob\nimport platform\nimport sys\n\ntry:\n import setuptools\nexcept:\n print('''\nsetuptools not found.\n\nOn linux, the package is often called python-setuptools''')\n sys.exit(1)\n\nimport os\n\nfrom setuptools.command.build_ext import build_ext as _build_ext\n# Based on http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\nclass build_ext(_build_ext):\n def finalize_options(self):\n _build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n __builtins__.__NUMPY_SETUP__ = False\n import numpy\n self.include_dirs.append(numpy.get_include())\n\ndef has_webp():\n return os.system(\"pkg-config --exists libwebp\") == 0\n\nexec(compile(open('imread/imread_version.py').read(),\n 'imread/imread_version.py', 'exec'))\nlong_description = open('README.rst').read()\n\nundef_macros = []\ndefine_macros = []\nif os.environ.get('DEBUG'):\n undef_macros = ['NDEBUG']\n if os.environ.get('DEBUG') == '2':\n define_macros.append( ('_GLIBCXX_DEBUG','1') )\ndefine_macros.append(('NPY_NO_DEPRECATED_API','NPY_1_7_API_VERSION'))\ndefine_macros.append(('PY_ARRAY_UNIQUE_SYMBOL','MahotasImread_PyArray_API_Symbol'))\n\n\nEXCLUDE_WEBP = os.environ.get('EXCLUDE_WEBP')\nif EXCLUDE_WEBP is None:\n EXCLUDE_WEBP = not has_webp()\n\nif EXCLUDE_WEBP:\n define_macros.append( ('IMREAD_EXCLUDE_WEBP', '1') )\n\ninclude_dirs = []\nlibrary_dirs = []\n\nfor pth in ('/usr/local/include', '/usr/X11/include'):\n if os.path.isdir(pth):\n include_dirs.append(pth)\n\nfor pth in ('/usr/local/lib', '/usr/X11/lib'):\n if os.path.isdir(pth):\n library_dirs.append(pth)\n\nextensions = {\n 'imread._imread': [\n 'imread/_imread.cpp',\n 'imread/lib/formats.cpp',\n 'imread/lib/numpy.cpp',\n 'imread/lib/_bmp.cpp',\n 'imread/lib/_jpeg.cpp',\n 'imread/lib/_lsm.cpp',\n 'imread/lib/_png.cpp',\n 'imread/lib/_tiff.cpp',\n ],\n}\n\n\nlibraries = ['png', 'jpeg', 'tiff']\nif sys.platform.startswith('win'):\n libraries.append('zlib')\n\nif not EXCLUDE_WEBP:\n extensions['imread._imread'].append('imread/lib/_webp.cpp')\n libraries.append('webp')\n\nextra_args = []\nif platform.platform().startswith('Darwin'):\n if int(platform.mac_ver()[0].split('.')[1]) >= 9:\n extra_args.append('-stdlib=libc++')\n\next_modules = [\n setuptools.Extension(\n key,\n libraries = libraries,\n library_dirs=library_dirs,\n include_dirs=include_dirs,\n sources=sources,\n undef_macros=undef_macros,\n define_macros=define_macros,\n extra_compile_args=extra_args,\n extra_link_args=extra_args,\n ) for key, sources in extensions.items()]\n\npackages = setuptools.find_packages()\n\npackage_dir = {\n 'imread.tests': 'imread/tests',\n }\npackage_data = {\n 'imread.tests': ['data/*',\n 'data/bad-files/*/*.tiff',\n 'data/bad-files/BMP/*/*.bmp',\n 'data/bad-files/LSM/*/*.lsm']\n }\n\nclassifiers = [\n'Development Status :: 4 - Beta',\n'Intended Audience :: Developers',\n'Intended Audience :: Science/Research',\n'Topic :: Multimedia',\n'Topic :: Scientific/Engineering :: Image Recognition',\n'Topic :: Software Development :: Libraries',\n'Programming Language :: Python',\n'Programming Language :: Python :: 2',\n'Programming Language :: Python :: 2.7',\n'Programming Language :: Python :: 3',\n'Programming Language :: Python :: 3.3',\n'Programming Language :: Python :: 3.4',\n'Programming Language :: Python :: 3.5',\n'Programming Language :: Python :: 3.6',\n'Programming Language :: Python :: 3.7',\n'Programming Language :: C++',\n'License :: OSI Approved :: MIT License',\n]\n\nsetuptools.setup(name = 'imread',\n version = __version__,\n description = 'imread: Image reading library',\n long_description = long_description,\n long_description_content_type = 'text/x-rst',\n author = 'Luis Pedro Coelho',\n author_email = 'luis@luispedro.org',\n license = 'MIT',\n platforms = ['Any'],\n classifiers = classifiers,\n url = 'http://luispedro.org/software/imread',\n packages = packages,\n ext_modules = ext_modules,\n package_dir = package_dir,\n package_data = package_data,\n cmdclass = {'build_ext': build_ext},\n setup_requires = ['numpy'],\n install_requires = ['numpy'],\n )\n","repo_name":"luispedro/imread","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"81"} +{"seq_id":"23800301646","text":"from collections import deque\n\n\ndef get_calories_dict(file_object) -> dict:\n \"\"\"\n Get a dictionary with entries for each elf and his food items.\n Elfs are now dwarfs. Iterate through the file data in a queue manner (Yeah, I know we really don't need a queue but\n I wanted to use one so I did...)\n and check if each food item is actually food\n or a delimiter. If food, add to the dictionary, otherwise if delimiter, add new dwarf to dictionary and continue.\n :param file_object: The file with the data.\n :return: Return a dictionary with the data.\n \"\"\"\n\n calories_dict = {}\n data = deque(file_object.readlines())\n dwarf_number = 1\n current_dwarf = f\"Dwarf_{dwarf_number}\"\n calories_dict[current_dwarf] = []\n\n while data:\n entry = data.popleft()\n if is_food_item(entry):\n calories_dict[current_dwarf].append(int(entry.strip()))\n else:\n dwarf_number += 1\n current_dwarf = f\"Dwarf_{dwarf_number}\"\n calories_dict[current_dwarf] = []\n\n return calories_dict\n\n\ndef is_food_item(entry: str) -> bool:\n \"\"\"\n Check if the current entry is a food item or a delimiter.\n :param entry: The current entry, foot item or delimiter.\n :return: Return a boolean True/False\n \"\"\"\n return entry[0].isnumeric()\n\n\ndef calculate_total_calories(calories_dict: dict) -> dict:\n \"\"\"\n Get total calories from all food items per dwarf.\n :param calories_dict: Dictionary with dwarfs and their food items.\n :return: Return a new dictionary with dwarfs and their total calories.\n \"\"\"\n return {dwarf: sum(food_items) for dwarf, food_items in calories_dict.items()}\n\n\ndef get_top_3_most_calories(total_calories_dict: dict) -> list:\n \"\"\"\n Reorder the dictionary such as the first entry to be the dwarf with the most calories, then return the calories of the\n top 3 dwarfs as a list.\n :param total_calories_dict: Dictionary with dwarfs and their total amount of calories\n :return: Returns a list of calories, each carried by the dwarf in the top 3.\n \"\"\"\n ordered_calories = sorted(total_calories_dict.items(), key=lambda x: x[1], reverse=True)\n top_3_most_calories = [ordered_calories[dwarf_position][1] for dwarf_position in range(0, 3)]\n return top_3_most_calories\n\n\nwith open(r\"./calories_data\", \"r\") as file:\n calories_dict = get_calories_dict(file_object=file)\n total_calories_dict = calculate_total_calories(calories_dict=calories_dict)\n top_3_most_calories = get_top_3_most_calories(total_calories_dict=total_calories_dict)\n sum_of_top_3_calories = sum(top_3_most_calories)\n print(sum_of_top_3_calories)\n","repo_name":"Nikikapralov/Advent-of-Code-2022","sub_path":"Day 1/day_1_2.py","file_name":"day_1_2.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41478249967","text":"import random\n\ndef solution(A):\n count = 0\n for p in range(len(A)):\n for q in range(p + 1, len(A)):\n for r in range(q + 1, len(A)):\n total = A[p] + A[q] + A[r]\n hyp = max(A[p], A[q], A[r])\n sides = total - hyp\n if sides > hyp:\n count += 1\n \n return count\n\n# test [10, 2, 5, 1, 8, 12] \n\ndef new_solution(A):\n # need 3 elements to make this work\n if len(A) < 3:\n return 0\n\n # sort first\n count = 0\n A.sort()\n \n for p_index in range(len(A) - 2):\n # we can share r_index across iterations of q.\n # if A[p] + A[q] > A[r], then A[p] + A[q + 1] > A[r]\n r_index = p_index + 2\n for q_index in range(p_index + 1, len(A) - 1):\n p = A[p_index]\n q = A[q_index]\n # keep advancing r_index until we find a value that fails to satisfy p + q > r\n while r_index < len(A) - 1 and p + q > A[r_index + 1]:\n r_index += 1\n\n if p + q > A[r_index]:\n count += r_index - q_index\n\n return count\n\nif __name__ == \"__main__\":\n for i in range(100):\n A = [random.randint(1,60) for p in range(20)]\n good = solution(A)\n new = new_solution(A)\n if good != new:\n print('Good: {} New: {} A: {}'.format(good, new, A))","repo_name":"danielphil/codility_training","sub_path":"lesson15/CountTriangles.py","file_name":"CountTriangles.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73455333385","text":"import pydot\n\ngraph = pydot.Dot('G', graph_type='digraph', bgcolor='grey')\n\n\ninitial_state = (\n (2, 8, 3),\n (1, 6, 4),\n (7, None, 5)\n)\n\n# initial_state =(\n# (1, 2, 3),\n# (7, 8, 4),\n# (None, 6, 5)\n# )\n\n\n\ngoal_state = (\n (1, 2, 3),\n (8, None, 4),\n (7, 6, 5)\n)\n\nswappable_positions = {\n 0: (1, 3),\n 1: (0, 2, 4),\n 2: (1, 5),\n 3: (0, 4, 6),\n 4: (1, 3, 5, 7),\n 5: (2, 4, 8),\n 6: (3, 7),\n 7: (4, 6, 8),\n 8: (5, 7)\n}\n\n\ndef get_2d_pos_from_idx(idx):\n '''\n Return (row,col)\n '''\n return (idx//3, idx % 3)\n\n\ndef get_idx_from_2d_pos(row, col):\n '''\n Return idx\n '''\n return row*3+col\n\n\ndef swap(game_state, current_idx, swap_idx):\n '''\n Given the current game state and index, swap the empty value with swap state\n Eg: gamel_state =(\n (2,8,3),\n (1,6,4),\n (7,None,5)\n )\n current_idx = 7\n swap_idx = 8\n Result state = (\n (2,8,3),\n (1,6,4),\n (7,5,None)\n )\n '''\n curr_row, curr_col = get_2d_pos_from_idx(current_idx)\n swap_row, swap_col = get_2d_pos_from_idx(swap_idx)\n temp_state = []\n\n for i in game_state:\n row = []\n for j in i:\n row.append(j)\n temp_state.append(row)\n\n temp_state[curr_row][curr_col], temp_state[swap_row][swap_col] = temp_state[swap_row][swap_col], temp_state[curr_row][curr_col]\n\n new_state = []\n\n for i in temp_state:\n row = []\n for j in i:\n row.append(j)\n new_state.append(tuple(row))\n\n return tuple(new_state)\n\n\ndef get_empty_idx(game_state):\n idx = 0\n\n for i in game_state:\n for j in i:\n if j is None:\n # Found the index of empty space\n return idx\n else:\n idx += 1\n return idx\n\n\n\ndef move_BFS(visited: set, q: list = []):\n i = 1\n while len(q) > 0:\n\n # Find the empty position\n\n game_state, idx = q.pop(0)\n\n if game_state == goal_state:\n print(\"Found the goal..\")\n pp(game_state)\n \n return True\n\n # Find the all possible move for that state\n positions = swappable_positions[idx]\n\n for position in positions:\n\n new_state = swap(game_state, idx, position)\n if new_state in visited:\n continue\n else:\n \n node = pydot.Node(str(new_state),label=str(new_state)+' '+str(i) )\n node.set_style(\"filled\")\n node.set_fillcolor('white')\n node.set_fontcolor(\"black\")\n\n graph.add_node(node)\n edge = pydot.Edge(str(game_state),str(new_state))\n graph.add_edge(edge)\n\n visited.add(new_state)\n\n # pp(new_state)\n empty_idx = get_empty_idx(new_state)\n\n q.append((new_state, empty_idx))\n i += 1\n \n print(\"Goal Not found\")\n return False\n\n\n\n\ndef move_DFS(visited: set, stack: list = [],max_depth=10):\n i = 1\n while len(stack) > 0:\n\n # Find the empty position\n\n game_state, idx, depth = stack.pop()\n # print(game_state,idx,depth)\n \n if game_state == goal_state:\n print(\"Found the goal..\")\n pp(game_state)\n \n return True\n \n if depth > max_depth:\n continue\n\n # Find the all possible move for that state\n positions = swappable_positions[idx]\n \n\n for position in positions:\n\n new_state = swap(game_state, idx, position)\n if new_state in visited:\n continue\n else:\n node = pydot.Node(str(new_state),label=str(new_state)+' '+str(i) )\n node.set_style(\"filled\")\n node.set_fillcolor('white')\n node.set_fontcolor(\"black\")\n\n graph.add_node(node)\n edge = pydot.Edge(str(game_state),str(new_state))\n graph.add_edge(edge)\n\n visited.add(new_state)\n\n empty_idx = get_empty_idx(new_state)\n\n stack.append((new_state, empty_idx,depth+1))\n i += 1\n \n print(\"Goal Not found\")\n return False\n\n\ndef pp(list_2d, end='\\n'):\n if end == '\\n':\n print('-------------------------')\n for i in list_2d:\n for j in i:\n print(j, end=' ')\n print()\n if end == '\\n':\n print('-------------------------')\n\n\nif __name__ == \"__main__\":\n\n visited = set()\n visited.add(initial_state)\n\n idx = get_empty_idx(initial_state)\n # q = [(initial_state, idx)]\n stack = [(initial_state, idx,0)]\n\n node = pydot.Node(str(initial_state),label=str(initial_state)+' 0')\n node.set_style(\"filled\")\n node.set_fillcolor('yellow')\n node.set_fontcolor(\"black\")\n graph.add_node(node)\n\n print(\"Initial State:\", initial_state)\n # res = move_BFS(visited, q)\n res = move_DFS(visited, stack)\n\n\n print(\"Drawing State Space...\")\n print(\"Total Nodes:\", len(graph.get_nodes()))\n if res:\n node = graph.get_node(f'\"{str(initial_state)}\"')[0]\n node.set_style(\"filled\")\n node.set_fillcolor('yellow')\n\n node = graph.get_node(f'\"{str(goal_state)}\"')[0]\n node.set_style(\"filled\")\n node.set_fillcolor('red')\n \n\n graph.write_png('output_dfs_10.png',prog='dot')\n print(\"Saved state space as output.png.\")\n\n","repo_name":"ashishsubedi/8puzzles","sub_path":"puzzles_8.py","file_name":"puzzles_8.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27159073389","text":"import json\nfrom typing import Dict\nfrom flask import request\n\n\ndef convert_to_camel(response):\n \"\"\"Convert keys to camelCase.\"\"\"\n\n if response.headers['Content-Type'] == 'application/json' and 'swagger.json' not in request.base_url:\n response.set_data(json.dumps(camelcase_dict(json.loads(response.get_data()), {})))\n return response\n\n\ndef camelcase(string):\n \"\"\"Convert a snake_cased string to camelCase.\"\"\"\n if '_' not in string or string.startswith('_'):\n return string\n return ''.join([\n x.capitalize() if i > 0 else x\n for i, x in enumerate(string.split('_'))\n ])\n\n\ndef camelcase_dict(data, camel_dict: Dict[str, any]):\n \"\"\"Iterate through the dict and convert to camel case.\"\"\"\n if data:\n # Handle the scenario where we aren't a dict\n if isinstance(data, list):\n return [camelcase_dict(item, {}) for item in data]\n for key, value in data.items():\n key = camelcase(key)\n if isinstance(value, dict):\n camel_dict[key] = camelcase_dict(value, {})\n elif isinstance(value, list):\n camel_dict[key] = []\n for list_value in value:\n camel_dict[key].append(\n list_value if isinstance(list_value, str) else camelcase_dict(list_value, {}))\n else:\n camel_dict[key] = value\n\n return camel_dict\n\n return None\n","repo_name":"bcgov/sbc-common-components","sub_path":"python/src/sbc_common_components/utils/camel_case_response.py","file_name":"camel_case_response.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28215966235","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 24 15:50:04 2020\n\n@author: Anirudh Raghavan\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n# Description of data\n\nsec_data = pd.read_csv(\"sec_fin_info_data.csv\")\n\n# First understand the dimensions of the data\n\nsec_data.shape\n\n# We have 8158 rows and 395 columns, let us take a look at the column names\n\nsec_data.columns\n\n\nsec_data[\"index\"] = sec_data[\"ticker\"] + sec_data[\"Year\"].astype(str) + sec_data[\"Quarter\"].astype(str)\n\n# Now we will remove the first 3 columns\n\nsec_data = sec_data.drop(['ticker', 'Year', 'Quarter'], axis=1)\n\nsec_data = sec_data.drop(['Web URL', 'Active/Inactive Status Marker'], axis=1)\n\n# Now we shall remove the columns which have more than 30% NA rows\n\nsec_data = sec_data.dropna(thresh = sec_data.shape[0]*0.5, axis=1)\n\nfor name in sec_data.columns[8:163]:\n print(name)\n if sum(sec_data[name] == 0) >= sec_data.shape[0]*0.5:\n print(\"yes\")\n sec_data = sec_data.drop([name], axis=1)\n\ncol_type = [str(sec_data[sec_data.columns[i]].dtype) for i in range(sec_data.shape[1]-1)]\n\nall(i == \"float64\" for i in col_type)\n\n# Thus other than the index column, all columns are numerical\n\nfor name in sec_data.columns[8:sec_data.shape[1]-1]:\n sec_data[name] = sec_data[name].replace(np.nan,sec_data[name].mean())\n\n# We shall now go ahead with normalization\n\ndef normalize_data (x, max_x, min_x):\n temp = (x - min_x)/(max_x-min_x)\n return temp\n\n# We then write a for loop to go through each column and then use apply on the each column to \n# compute normalized values and these are then replaced in the column \n\nfor name in sec_data.columns[8:105]:\n print(name)\n sec_data[name] = sec_data[name].apply(normalize_data, args = (max(sec_data[name]), min(sec_data[name])))\n\n# Now let us take the numerical columns and compute the correlation matrix\n\nsec_data_num = sec_data.iloc[:,8:105]\n\n# First compute a correlation matrix to observe the amount of correlation between data points\n\ncorr_mat = sec_data_num.corr()\n\n# Let us see the number of variables with correlation of more than 0.9\n\nN = corr_mat.shape[0]\n\nhigh_corr = []\n\nfor i in range(N):\n for j in range(i,N):\n if corr_mat.iloc[i,j] >= 0.90 or corr_mat.iloc[i,j] <= -0.90:\n tmp = [i,j]\n high_corr.append(tmp)\n\nadded = []\nremoved = []\n\n# We now have pairs of variables with more than 0.9 correlation.\n# We will now keep 1 column from each pair of highly correlated pairs in our dataset \n# and remove the others. We will also ensure that there are no duplications.\n\nfor pair in high_corr:\n i, j = pair\n if i == j:\n continue\n if i in added or j in added or i in removed or j in removed:\n continue\n else:\n added.append(i)\n removed.append(j)\n\n\nsec_data_shrunk = sec_data_num.drop(sec_data_num.columns[removed], axis = 1)\n\n# This will now become our set of features to work with for training our models\n\nsec_data_shrunk.to_csv(\"sec_fin_features.csv\", index = False)\n\n\n# Create separate datasets for each prediction label\n\nsec_labels_1 = sec_data.iloc[:,1]\nsec_labels_2 = sec_data.iloc[:,2]\nsec_labels_4 = sec_data.iloc[:,4]\nsec_labels_6 = sec_data.iloc[:,6]\nsec_index = sec_data.iloc[:,105]\n\n\n\n# Save each dataset as a csv file\n\nsec_labels_1.to_csv(\"sec_fin_1.csv\", index = False)\nsec_labels_2.to_csv(\"sec_fin_2.csv\", index = False)\nsec_labels_4.to_csv(\"sec_fin_4.csv\", index = False)\nsec_labels_6.to_csv(\"sec_fin_6.csv\", index = False)\nsec_index.to_csv(\"sec_index.csv\", index = False)\n\n\n","repo_name":"Ani-07/Tech-Stock-Price-Modelling","sub_path":"Initial Model/sec_data_cleaning.py","file_name":"sec_data_cleaning.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4431123594","text":"try:\n import Tkinter as tk\n from Tkinter import ttk, scrolledtext\nexcept ImportError:\n import tkinter as tk\n from tkinter import ttk, scrolledtext\n\nfrom functools import partial\n\n\ndef add_window(title):\n window = tk.Tk()\n window.title(title)\n return window\n\n\ndef add_label(window, text, column=0, row=0):\n label = ttk.Label(window, text=text)\n label.grid(column=column, row=row)\n return label\n\n\ndef add_button(window, text, command, column=0, row=0):\n action = ttk.Button(window, text=text, command=command)\n action.grid(column=column, row=row)\n return action\n\n\ndef add_text_widget(window, width=10, column=0, row=0):\n text = tk.StringVar()\n text_entered = ttk.Entry(window, width=width, textvariable=text)\n text_entered.grid(column=column, row=row)\n text_entered.focus()\n return text_entered\n\n\ndef add_combobox(window, width=10, column=0, row=0, values=None):\n text = tk.StringVar()\n text_chosen = ttk.Combobox(\n window, width=width, textvariable=text, state='readonly')\n text_chosen.grid(column=column, row=row)\n text_chosen['values'] = values\n return text_chosen\n\n\ndef add_checkbox(window, text, column=0, row=0):\n status_var = tk.IntVar()\n check = tk.Checkbutton(window, text=text, variable=status_var)\n check.grid(column=column, row=row)\n return check, status_var\n\n\ndef add_scrolledtext(\n window, width=20, height=5, wrap=tk.WORD,\n column=0, row=0, columnspan=3):\n scroll_text = scrolledtext.ScrolledText(\n window, width=width, height=height, wrap=wrap)\n scroll_text.grid(column=column, row=row, columnspan=3)\n return scroll_text\n\n\ndef click_button_action(text):\n print(text.get())\n text.configure(foreground='red')\n\n\ndef click_button_action_checkbox(status):\n print(\"[checkbox] is selected: \", bool(status.get()))\n\n\ndef click_button_action_scrolledtext(text):\n print(text.get('1.0', tk.END))\n text.configure(foreground='red')\n\n\ndef main():\n window = add_window(\"Simple Window\")\n\n l1 = add_label(window, \"Provide text:\", column=0, row=0)\n text_entered = add_text_widget(window, column=1, row=0)\n action = add_button(\n window, text=\"Process\", column=2, row=0,\n command=partial(click_button_action, text_entered))\n\n l2 = add_label(window, \"Select:\", column=0, row=1)\n values = (1, 2, 3, 4, 5)\n text_chosen = add_combobox(window, column=1, row=1, values=values)\n text_chosen.current(0)\n action = add_button(\n window, text=\"Process\", column=2, row=1,\n command=partial(click_button_action, text_chosen))\n\n l3 = add_label(window, \"Select:\", column=0, row=2)\n check_box, status_var = add_checkbox(window, text=\"Enable\", column=1, row=2)\n action = add_button(\n window, text=\"Process\", column=2, row=2,\n command=partial(click_button_action_checkbox, status_var))\n\n text = add_scrolledtext(window, column=0, row=3)\n action = add_button(\n window, text=\"Process\", column=4, row=3,\n command=partial(click_button_action_scrolledtext, text))\n\n window.mainloop()\n\nif __name__ == \"__main__\":\n main()","repo_name":"hopsmdev/playground","sub_path":"gui-tk/simple_window.py","file_name":"simple_window.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11386973929","text":"import re\r\n\r\nfrom util.OS import OS\r\n\r\n\r\nclass Add_tplDecorator:\r\n def __init__(self, pack):\r\n for i, line in enumerate(pack.lines):\r\n pattern = 'require\\([\\'|\\\"](.*?)[\\'|\\\"]\\)'\r\n res = re.search(pattern, line)\r\n if res is None:\r\n continue\r\n\r\n tpl_path = pack.watch_file_dir + res.group(1)\r\n try:\r\n tpl_fp = OS.open(tpl_path)\r\n content = tpl_fp.read(9999999)\r\n tpl_fp.close()\r\n line = re.sub('require\\([\\'|\\\"](.*?)[\\'|\\\"]\\)', '`' + content + '`', line)\r\n except Exception as e:\r\n print(e)\r\n pack.lines[i] = line\r\n","repo_name":"KqSMea8/hsm","sub_path":"script/src/lib/Add_tplDecorator.py","file_name":"Add_tplDecorator.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39785096794","text":"import frappe\nfrom awesome_cart.compat.customer import get_current_customer\nfrom erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry\nfrom frappe import _\nfrom frappe.contacts.doctype.contact.contact import get_default_contact\nfrom frappe.core.doctype.role.role import get_emails_from_role\nfrom frappe.desk.form import assign_to\nfrom frappe.model.mapper import get_mapped_doc\n\n\ndef set_missing_values(warranty_claim, method):\n\tif not warranty_claim.customer:\n\t\tcustomer = get_current_customer()\n\t\twarranty_claim.customer = customer.name\n\telse:\n\t\tcustomer = frappe.get_doc(\"Customer\", warranty_claim.customer)\n\n\twarranty_claim.update({\n\t\t\"customer_name\": customer.customer_name,\n\t\t\"contact_person\": get_default_contact(\"Customer\", customer.name)\n\t})\n\n\tif not warranty_claim.contact_email:\n\t\twarranty_claim.contact_email = customer.email_id\n\n\tif not warranty_claim.contact_mobile:\n\t\twarranty_claim.contact_mobile = customer.mobile_no\n\n\tif not warranty_claim.serial_no and frappe.db.exists(\"Serial No\", warranty_claim.unlinked_serial_no):\n\t\twarranty_claim.serial_no = warranty_claim.unlinked_serial_no\n\n\tif warranty_claim.serial_no:\n\t\tserial_no = frappe.get_doc(\"Serial No\", warranty_claim.serial_no)\n\n\t\twarranty_claim.update({\n\t\t\t\"item_code\": serial_no.item_code,\n\t\t\t\"item_name\": serial_no.item_name,\n\t\t\t\"item_group\": serial_no.item_group,\n\t\t\t\"description\": serial_no.description,\n\t\t\t\"warranty_amc_status\": serial_no.maintenance_status,\n\t\t\t\"warranty_expiry_date\": serial_no.warranty_expiry_date,\n\t\t\t\"amc_expiry_date\": serial_no.amc_expiry_date,\n\t\t\t\"is_under_warranty\": serial_no.maintenance_status in [\"Under Warranty\", \"Under AMC\"]\n\t\t})\n\n\ndef validate_missing_serial_no(warranty_claim, method):\n\tif warranty_claim.item_group == \"Custom\":\n\t\tif not (warranty_claim.serial_no or warranty_claim.unlinked_serial_no):\n\t\t\tfrappe.throw(_(\"Custom products must have a serial number\"))\n\n\ndef validate_serial_no_warranty(serial_no, method):\n\t# Remove warranty period for old manufactured items that are not in the system\n\tif serial_no.purchase_document_no:\n\t\tif frappe.db.get_value(\"Stock Entry\", serial_no.purchase_document_no, \"purpose\") != \"Manufacture\":\n\t\t\tserial_no.warranty_period = None\n\n\ndef set_iem_owner(warranty_claim, method):\n\tif warranty_claim.item_group and warranty_claim.item_group != \"Custom\":\n\t\twarranty_claim.iem_owner = None\n\t\treturn\n\n\tserial_no = warranty_claim.serial_no or warranty_claim.unlinked_serial_no\n\n\tif serial_no:\n\t\timpression_id = frappe.db.get_value(\"Serial No\", serial_no, \"impression_id\")\n\n\t\tif not impression_id:\n\t\t\t# Split the serial number to retrieve the IID (serial number format: JH{IEM model shorthand}-{IID}-{count})\n\t\t\timpression_id = serial_no.split(\"-\")\n\t\t\timpression_id = impression_id[1] if len(impression_id) > 1 else impression_id[0]\n\n\t\t\ttry:\n\t\t\t\timpression_id = int(impression_id)\n\t\t\texcept ValueError:\n\t\t\t\treturn\n\n\t\t\tif impression_id:\n\t\t\t\tif frappe.db.exists(\"Serial No\", serial_no):\n\t\t\t\t\tfrappe.db.set_value(\"Serial No\", serial_no, \"impression_id\", impression_id)\n\n\t\t\t\t\tiem_owner = frappe.get_all(\"IEM Owner\", or_filters={\"impression_id\": impression_id, \"old_impression_id\": impression_id})\n\t\t\t\t\tif iem_owner:\n\t\t\t\t\t\tfrappe.db.set_value(\"Serial No\", serial_no, \"iem_owner\", iem_owner[0].name)\n\n\t\tif impression_id:\n\t\t\tiem_owner = frappe.get_all(\"IEM Owner\", or_filters={\"impression_id\": impression_id, \"old_impression_id\": impression_id})\n\t\t\tif iem_owner:\n\t\t\t\twarranty_claim.iem_owner = iem_owner[0].name\n\t\t\telse:\n\t\t\t\twarranty_claim.iem_owner = None\n\t\telse:\n\t\t\twarranty_claim.iem_owner = None\n\n\ndef assign_warranty_claim(warranty_claim, method):\n\tif not frappe.get_all(\"ToDo\", filters={\"reference_type\": \"Warranty Claim\", \"reference_name\": warranty_claim.name}):\n\t\trepair_settings = frappe.get_doc(\"Repair Settings\")\n\t\tuser_emails = []\n\n\t\tfor notification in repair_settings.notification_settings:\n\t\t\tif notification.status == warranty_claim.status:\n\t\t\t\tif notification.user:\n\t\t\t\t\tuser_emails.append(notification.user)\n\n\t\t\t\tif notification.role:\n\t\t\t\t\tuser_emails.extend(get_emails_from_role(notification.role))\n\n\t\t\t\tif notification.cc:\n\t\t\t\t\tnotification.cc = notification.cc.replace(\",\", \"\\n\")\n\t\t\t\t\tuser_emails.extend(notification.cc.split(\"\\n\"))\n\n\t\tuser_emails = list(set(user_emails))\n\t\tadmin_email = frappe.db.get_value(\"User\", \"Administrator\", \"email\")\n\n\t\tif admin_email in user_emails:\n\t\t\tuser_emails.remove(admin_email)\n\n\t\tfor user in user_emails:\n\t\t\tassign_to.add({\n\t\t\t\t'assign_to': user,\n\t\t\t\t'doctype': \"Warranty Claim\",\n\t\t\t\t'name': warranty_claim.name,\n\t\t\t\t'description': \"Service Request {0} just moved to the '{1}' status\".format(warranty_claim.name, warranty_claim.status),\n\t\t\t\t'priority': 'Medium',\n\t\t\t\t'notify': 1\n\t\t\t})\n\n\ndef receive_stock_item(warranty_claim, method):\n\tif warranty_claim.item_received and warranty_claim.item_code:\n\t\tcreate_stock_entry(warranty_claim)\n\n\ndef set_shipping_date(dti_shipment_note, method):\n\twarranty_claim = frappe.db.get_value(\"Delivery Note\", dti_shipment_note.delivery_note, \"warranty_claim\")\n\n\tif warranty_claim:\n\t\twarranty_claim = frappe.get_doc(\"Warranty Claim\", warranty_claim)\n\n\t\tif method == \"on_submit\":\n\t\t\twarranty_claim.shipping_date = frappe.utils.now_datetime()\n\t\telif method == \"on_cancel\":\n\t\t\twarranty_claim.shipping_date = None\n\n\t\twarranty_claim.save()\n\n\ndef complete_work_order(stock_entry, method):\n\tif method == \"on_submit\":\n\t\tif stock_entry.purpose == \"Material Transfer for Manufacture\":\n\t\t\twarranty_claim = frappe.db.get_value(\"Work Order\", stock_entry.work_order, \"warranty_claim\")\n\n\t\t\tif warranty_claim:\n\t\t\t\tupdate_fields = {\n\t\t\t\t\t\"produced_qty\": 1,\n\t\t\t\t\t\"status\": \"Completed\"\n\t\t\t\t}\n\n\t\t\t\tfrappe.db.set_value(\"Work Order\", {\"warranty_claim\": warranty_claim}, update_fields, val=None)\n\t\t\t\tfrappe.db.commit()\n\n\t\t\t\twarranty_claim = frappe.get_doc(\"Warranty Claim\", warranty_claim)\n\t\t\t\tif warranty_claim.status == \"Repairing\":\n\t\t\t\t\twarranty_claim.status = \"To Deliver\"\n\t\t\t\t\twarranty_claim.resolution_date = frappe.utils.now_datetime()\n\t\t\t\t\twarranty_claim.save()\n\n\ndef create_stock_entry(warranty_claim):\n\tto_warehouse = frappe.db.get_single_value(\"Repair Settings\", \"default_incoming_warehouse\")\n\tserial_no = warranty_claim.serial_no or warranty_claim.unlinked_serial_no\n\n\tstock_entry = make_stock_entry(item_code=warranty_claim.item_code, qty=1,\n\t\t\t\t\t\t\t\t\tto_warehouse=to_warehouse, serial_no=serial_no,\n\t\t\t\t\t\t\t\t\tdo_not_save=True)\n\n\tfor item in stock_entry.items:\n\t\titem.warranty_claim = warranty_claim.name\n\n\t# Include the cable and case in the stock receipt, if entered\n\tif warranty_claim.cable:\n\t\tstock_entry.append(\"items\", {\n\t\t\t\"item_code\": warranty_claim.cable,\n\t\t\t\"t_warehouse\": to_warehouse,\n\t\t\t\"qty\": 1\n\t\t})\n\n\tif warranty_claim.case:\n\t\tstock_entry.append(\"items\", {\n\t\t\t\"item_code\": warranty_claim.case,\n\t\t\t\"t_warehouse\": to_warehouse,\n\t\t\t\"qty\": 1\n\t\t})\n\n\tfor item in stock_entry.items:\n\t\titem.allow_zero_valuation_rate = True\n\n\tstock_entry.insert()\n\tstock_entry.submit()\n\n\tif not warranty_claim.serial_no:\n\t\twarranty_claim.db_set(\"serial_no\", serial_no)\n\n\tif not warranty_claim.item_received:\n\t\twarranty_claim.db_set(\"item_received\", True)\n\n\twarranty_claim.reload()\n\n\treturn stock_entry.name\n\n\ndef flush_raw_materials_for_repair(stock_entry, method):\n\tif method == \"on_submit\":\n\t\tnew_se = frappe.new_doc(\"Stock Entry\")\n\t\tconsumption_warehouse = frappe.db.get_single_value(\"Repair Settings\", \"default_consumption_warehouse\")\n\n\t\tnew_se.update({\n\t\t\t\"purpose\": \"Material Issue\",\n\t\t\t\"work_order\": stock_entry.work_order,\n\t\t\t\"from_bom\": 1,\n\t\t\t\"fg_completed_qty\": 1,\n\t\t\t\"from_warehouse\": frappe.db.get_single_value(\"Repair Settings\", \"default_consumption_warehouse\"),\n\t\t\t\"reference_stock_entry\": stock_entry.name\n\t\t})\n\n\t\tconsumption_items = [item.as_dict() for item in stock_entry.items if item.t_warehouse == consumption_warehouse]\n\n\t\tif consumption_items:\n\t\t\tfor c_item in consumption_items:\n\t\t\t\tc_item.s_warehouse = consumption_warehouse\n\t\t\t\tc_item.t_warehouse = None\n\n\t\t\tnew_se.set(\"items\", consumption_items)\n\t\t\tnew_se.save()\n\t\t\tnew_se.submit()\n\telif method == \"on_cancel\":\n\t\tif stock_entry.purpose == \"Material Transfer for Manufacture\":\n\t\t\texisting_se = frappe.db.get_value(\"Stock Entry\", filters={\"reference_stock_entry\": stock_entry.name})\n\n\t\t\tif existing_se:\n\t\t\t\texisting_se = frappe.get_doc(\"Stock Entry\", existing_se)\n\t\t\t\texisting_se.cancel()\n\t\t\t\texisting_se.delete()\n\n\ndef make_mapped_doc(target_dt, source_dn, target_doc, target_cdt=None, filters=None,\n\t\t\t\t\tfield_map=None, postprocess=None, child_postprocess=None, check_for_existing=True):\n\tif not field_map:\n\t\tfield_map = {}\n\n\tif not filters:\n\t\tfilters = {\"warranty_claim\": source_dn, \"docstatus\": 1}\n\n\ttable_map = {\n\t\t\"Warranty Claim\": {\n\t\t\t\"doctype\": target_dt,\n\t\t\t\"field_map\": field_map\n\t\t}\n\t}\n\n\tif target_cdt:\n\t\ttable_map.update({\n\t\t\t\"Warranty Claim Services\": {\n\t\t\t\t\"doctype\": target_cdt,\n\t\t\t\t\"field_map\": field_map,\n\t\t\t\t\"postprocess\": child_postprocess\n\t\t\t}\n\t\t})\n\n\t# Multiple sales orders and stock entries can be made against Warranty Claim\n\tif check_for_existing:\n\t\tif frappe.get_all(target_dt, filters=filters):\n\t\t\tfrappe.throw(_(\"A {0} document already exists for this request.\".format(target_dt)))\n\n\treturn get_mapped_doc(\"Warranty Claim\", source_dn, table_map, target_doc, postprocess=postprocess)\n\n\ndef get_wc_dashboard_data(data):\n\tif not data:\n\t\treturn frappe._dict({\n\t\t\t'fieldname': 'warranty_claim',\n\t\t\t'non_standard_fieldnames': {},\n\t\t\t'internal_links': {},\n\t\t\t'transactions': [\n\t\t\t\t{\n\t\t\t\t\t'label': _('Reference'),\n\t\t\t\t\t'items': ['Quotation', 'Sales Order']\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'label': _('Stock'),\n\t\t\t\t\t'items': ['Stock Entry']\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'label': _('Work'),\n\t\t\t\t\t'items': ['Work Order']\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'label': _('Fulfilment'),\n\t\t\t\t\t'items': ['Sales Invoice', 'Delivery Note']\n\t\t\t\t}\n\t\t\t]\n\t\t})\n\n\treturn data","repo_name":"Bloomstack/repairs","sub_path":"repairs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"74463991624","text":"from django.shortcuts import render, get_object_or_404, redirect\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.urls import reverse\r\nfrom django.template import loader\r\nfrom django.contrib.auth.models import User\r\nfrom django.db.models.signals import post_save\r\nfrom notifications.signals import notify\r\nimport datetime\r\nfrom users.models import UserProfile\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\n# from .models import TradePost as TradePost_Model\r\n# from .models import pokemon as Pokemon\r\n\r\n\r\nfrom django.utils import timezone\r\n\r\nimport datetime\r\nfrom .models import TradePost, Pokemon, OfferTrade, Favourite, PokeAbilities\r\npkgames = [\"Red\", \"Blue\", \"Yellow\", \"Gold\", \"Silver\", \"Crystal\", \"Ruby\", \"Sapphire\", \"Emerald\", \"FireRed\", \"LeafGreen\", \"Diamond\", \"Pearl\", \"Platinum\", \"HeartGold\", \"SoulSilver\", \"Black\", \"White\", \"Colosseum\", \"XD\", \"Black-2\", \"White-2\", \"X\", \"Y\"]\r\npokemon = Pokemon.objects.order_by('name')\r\npokeabilities = PokeAbilities.objects.order_by('name')\r\npktypes = [\"Normal\", \"Fighting\", \"Flying\", \"Poison\", \"Ground\", \"Rock\", \"Bug\", \"Ghost\", \"Steel\", \"Fire\", \"Water\", \"Grass\", \"Electric\", \"Psychic\", \"Ice\", \"Dragon\", \"Dark\", \"Fairy\"]\r\npktypes = sorted(pktypes)\r\npkgames = sorted(pkgames)\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n user = request.user\r\n\r\n # loader.get_template('pokemon/index.html')\r\n # print (request.tradepost.trader)\r\n print (\"profile 0\")\r\n now = datetime.datetime.now()\r\n #pokemon = Pokemon.objects.all()\r\n tradepost = TradePost.objects.all()\r\n #pokeabilities = PokeAbilities.objects.all()\r\n\r\n\r\n #tradepost = TradePost.objects.all()\r\n #return HttpResponse(\"Hello, world. You are at the Pokemon Marketplace.\")\r\n favo = Favourite.objects.filter(trader_id = user.id ).values('post_id')\r\n print(favo)\r\n print(now - datetime.timedelta(days=2))\r\n #tradepost = TradePost.objects.exclude(trader=user.username).filter(Deadline__gte = (now - datetime.timedelta(days=2)))\r\n #tradepost = TradePost.objects.exclude(pk__in = favo).filter(Deadline__gte = (now - datetime.timedelta(days=0)))\r\n tradepost = TradePost.objects.exclude(pk__in = favo)\r\n\r\n print(tradepost)\r\n pkid = \"0\"\r\n pkname = \"0\"\r\n pktype = \"0\"\r\n pkgame = \"0\"\r\n pkability = \"0\"\r\n\r\n trader = \"0\"\r\n pokemon_offer = \"0\"\r\n gender = \"0\"\r\n level = \"0\"\r\n game = \"0\"\r\n deadline = \"0\"\r\n tradepost = tradepost.order_by('pk')\r\n if request.method == 'GET':\r\n reverse = ''\r\n getter = request.GET\r\n if len(getter) > 0:\r\n \r\n if getter.get('trader') != None:\r\n trader = getter.get('trader')\r\n if trader == \"0\":\r\n trader = \"1\"\r\n else:\r\n trader = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'trader')\r\n elif getter.get('pokemon_offer') != None:\r\n pokemon_offer = getter.get('pokemon_offer')\r\n if pokemon_offer == \"0\":\r\n pokemon_offer = \"1\"\r\n else:\r\n pokemon_offer = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'pokemon_name')\r\n elif getter.get('gender') != None:\r\n gender = getter.get('gender')\r\n if gender == \"0\":\r\n gender = \"1\"\r\n else:\r\n gender = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'pokemon_gender')\r\n elif getter.get('level') != None:\r\n level = getter.get('level')\r\n if level == \"0\":\r\n level = \"1\"\r\n else:\r\n level = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'pokemon_level')\r\n elif getter.get('game') != None:\r\n game = getter.get('game')\r\n if game == \"0\":\r\n game = \"1\"\r\n else:\r\n game = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'game')\r\n elif getter.get('deadline') != None:\r\n deadline = getter.get('deadline')\r\n if deadline == \"0\":\r\n deadline = \"1\"\r\n else:\r\n deadline = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'deadline')\r\n \r\n elif request.method == 'POST':\r\n if 'pkid' in request.POST:\r\n pkid = request.POST['pkid']\r\n tradepost = tradepost.filter(pokemon_id=str(pkid).zfill(3))\r\n print (\"profile 1\")\r\n elif 'pkname' in request.POST:\r\n pkname = request.POST['pkname']\r\n tradepost = tradepost.filter(pokemon_name=pkname)\r\n elif 'pktype' in request.POST:\r\n pktype = request.POST['pktype']\r\n tradepost = tradepost.filter(pokemon_types__contains=[pktype])\r\n elif 'pkgame' in request.POST:\r\n pkgame = request.POST['pkgame']\r\n tradepost = tradepost.filter(game=pkgame)\r\n print (\"profile 2\")\r\n elif 'pkability' in request.POST:\r\n pkability = request.POST['pkability']\r\n tradepost = tradepost.filter(pokemon_abilities__contains=[pkability])\r\n\r\n return render(request, 'pokemon/index.html',{\r\n 'tradepost':tradepost,'pokemon':pokemon, 'pokeabilities': pokeabilities, 'pktype':pktypes, 'pkgame': pkgames,\r\n '1pk': {'id':pkid, 'name':pkname, 'type':pktype, 'game':pkname, 'ability':pkability},\r\n 'queryString': \"&pkid=\"+pkid+\"&pkname=\"+pkname+\"&pktype=\"+pktype+\"&pkgame=\"+pkgame+\"&pkability=\"+pkability,\r\n 'toggle': {'trader': trader, 'pokemon_offer': pokemon_offer, 'gender': gender, 'level': level, 'game': game, 'deadline': deadline},\r\n \"unread_count\": request.user.notifications.unread().count() if user.is_authenticated else 0,\r\n \"notifications\": request.user.notifications.all() if user.is_authenticated else {}\r\n })\r\n\r\n\r\n@login_required\r\ndef profile(request):\r\n print (\"fav\")\r\n user = request.user\r\n # from notifications.signals import notify\r\n # notify.send(user, recipient=user, verb=\"Notification\")\r\n\r\n fcode = \"0\"\r\n if user.is_authenticated:\r\n # current_user = request.user\r\n fcode = UserProfile.objects.get(user_name=user.username)\r\n\r\n print (user.id)\r\n print(user.email)\r\n print(user.username)\r\n print(\"hello world 2\")\r\n\r\n incomplete_tradepost = TradePost.objects.filter(trader=user.username)#.filter(completed_by=user.username)\r\n completed_post = TradePost.objects.filter(completed_by=user.username)\r\n\r\n tradepost = incomplete_tradepost.union(completed_post)\r\n tradepost = tradepost.order_by('pk')\r\n favourite = Favourite.objects.filter(trader_name=user.username)\r\n offertrade = OfferTrade.objects.filter(trader=user.username)\r\n\r\n return render(\r\n request,\r\n \"pokemon/profile.html/\",\r\n {\"tradepost\": tradepost, \"offertrade\": offertrade, \"favourite\": favourite, \"fcode\": fcode},\r\n )\r\n\r\n\r\ndef resource(request):\r\n\r\n # loader.get_template('pokemon/index.html')\r\n\r\n # return HttpResponse(\"Hello, world. You are at the Pokemon Marketplace.\")\r\n return render(request, \"pokemon/resource.html\")\r\n\r\ndef tradelist(request):\r\n user = request.user\r\n\r\n\r\n #loader.get_template('pokemon/index.html')\r\n #pokemon = Pokemon.objects.all()\r\n #pokeabilities = PokeAbilities.objects.all()\r\n\r\n now = datetime.datetime.now()\r\n\r\n favo = Favourite.objects.filter(trader_id = user.id ).values('post_id')\r\n print(favo)\r\n print(now - datetime.timedelta(days=2))\r\n #tradepost = TradePost.objects.exclude(trader=user.username).filter(Deadline__gte = (now - datetime.timedelta(days=2)))\r\n tradepost = TradePost.objects.exclude(pk__in = favo)\r\n #tradepost = TradePost.objects.all()\r\n\r\n pkid = \"0\"\r\n pkname = \"0\"\r\n pktype = \"0\"\r\n pkgame = \"0\"\r\n pkability = \"0\"\r\n\r\n trader = \"0\"\r\n pokemon_offer = \"0\"\r\n gender = \"0\"\r\n level = \"0\"\r\n game = \"0\"\r\n deadline = \"0\"\r\n\r\n tradepost = tradepost.order_by('pk')\r\n\r\n if request.method == 'GET':\r\n reverse = ''\r\n getter = request.GET\r\n if len(getter) > 0:\r\n if getter.get('pkid') != '0':\r\n pkid = getter.get('pkid')\r\n tradepost = tradepost.filter(pokemon_id=str(pkid).zfill(3))\r\n elif getter.get('pkname') != '0':\r\n pkname = getter.get('pkname')\r\n tradepost = tradepost.filter(pokemon_name=pkname)\r\n elif getter.get('pktype') != '0':\r\n pktype = getter.get('pktype')\r\n tradepost = tradepost.filter(pokemon_types__contains=[pktype])\r\n elif getter.get('pkgame') != '0':\r\n pkgame = getter.get('pkgame')\r\n tradepost = tradepost.filter(game=pkgame)\r\n elif getter.get('pkability') != '0':\r\n pkability = getter.get('pkability')\r\n tradepost = tradepost.filter(pokemon_abilities__contains=[pkability])\r\n \r\n if getter.get('trader') != None:\r\n trader = getter.get('trader')\r\n if trader == \"0\":\r\n trader = \"1\"\r\n else:\r\n trader = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'trader')\r\n elif getter.get('pokemon_offer') != None:\r\n pokemon_offer = getter.get('pokemon_offer')\r\n if pokemon_offer == \"0\":\r\n pokemon_offer = \"1\"\r\n else:\r\n pokemon_offer = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'pokemon_name')\r\n elif getter.get('gender') != None:\r\n gender = getter.get('gender')\r\n if gender == \"0\":\r\n gender = \"1\"\r\n else:\r\n gender = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'pokemon_gender')\r\n elif getter.get('level') != None:\r\n level = getter.get('level')\r\n if level == \"0\":\r\n level = \"1\"\r\n else:\r\n level = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'pokemon_level')\r\n elif getter.get('game') != None:\r\n game = getter.get('game')\r\n if game == \"0\":\r\n game = \"1\"\r\n else:\r\n game = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'game')\r\n elif getter.get('deadline') != None:\r\n deadline = getter.get('deadline')\r\n if deadline == \"0\":\r\n deadline = \"1\"\r\n else:\r\n deadline = \"0\"\r\n reverse = \"-\"\r\n tradepost = tradepost.order_by(reverse+'deadline')\r\n \r\n elif request.method == 'POST':\r\n if 'pkid' in request.POST:\r\n pkid = request.POST['pkid']\r\n tradepost = tradepost.filter(pokemon_id=str(pkid).zfill(3))\r\n elif 'pkname' in request.POST:\r\n pkname = request.POST['pkname']\r\n tradepost = tradepost.filter(pokemon_name=pkname)\r\n elif 'pktype' in request.POST:\r\n pktype = request.POST['pktype']\r\n tradepost = tradepost.filter(pokemon_types__contains=[pktype])\r\n elif 'pkgame' in request.POST:\r\n pkgame = request.POST['pkgame']\r\n tradepost = tradepost.filter(game=pkgame)\r\n elif 'pkability' in request.POST:\r\n pkability = request.POST['pkability']\r\n tradepost = tradepost.filter(pokemon_abilities__contains=[pkability])\r\n\r\n return render(request, 'pokemon/tradelist.html',{\r\n 'tradepost':tradepost,'pokemon':pokemon, 'pokeabilities': pokeabilities, 'pktype':pktypes, 'pkgame': pkgames,\r\n '1pk': {'id':pkid, 'name':pkname, 'type':pktype, 'game':pkname, 'ability':pkability},\r\n 'queryString': \"&pkid=\"+pkid+\"&pkname=\"+pkname+\"&pktype=\"+pktype+\"&pkgame=\"+pkgame+\"&pkability=\"+pkability,\r\n 'toggle': {'trader': trader, 'pokemon_offer': pokemon_offer, 'gender': gender, 'level': level, 'game': game, 'deadline': deadline}\r\n })\r\n\r\ndef pokelocator(request):\r\n pokemon = Pokemon.objects.order_by(\"id\")\r\n return render(request, \"pokemon/pokelocator.html\", {\"pokemon\": pokemon})\r\n\r\n\r\ndef maketrade(request):\r\n if request.method == 'GET':\r\n pokemon_list = Pokemon.objects.order_by('id')\r\n return render(request, 'pokemon/maketrade.html', {'pokemon_list': pokemon_list, 'pkgames': pkgames})\r\n else:\r\n data = request.POST\r\n pkData = data['pkData'].split(' - ')\r\n user = request.user\r\n deadline = timezone.now() + datetime.timedelta(days=3)\r\n pokemon = Pokemon.objects.get(id=int(pkData[0]))\r\n tradepost = TradePost(\r\n trader=user.username,\r\n pokemon_name=pkData[1],\r\n pokemon_id=int(pkData[0]),\r\n pokemon_types=pokemon.types,\r\n pokemon_abilities=pokemon.abilities,\r\n pokemon_gender=data[\"pkGender\"],\r\n pokemon_level=data[\"pkLevel\"],\r\n game=data[\"pkGame\"],\r\n deadline=deadline,\r\n )\r\n tradepost.save()\r\n return HttpResponseRedirect(reverse('pokemon:profile'))\r\n\r\n@login_required\r\ndef offertrade(request, post_id):\r\n\r\n print (post_id)\r\n tradepost = get_object_or_404(TradePost, pk=post_id)\r\n user = request.user\r\n # Need to see whether it has been expired or traded, if yes, we need to redirect the page.\r\n if tradepost.traded_date is not None:\r\n return render(request, 'pokemon/offertrade.html', {'error_message': 'This trade post has been completed.'})\r\n\r\n if request.method == 'GET':\r\n pokemon_list = Pokemon.objects.order_by('id')\r\n # Need to check if the post is the current user's post or not\r\n if tradepost.trader != user.username:\r\n return render(\r\n request,\r\n \"pokemon/offertrade.html\",\r\n {\"tradepost\": tradepost, \"pokemon_list\": pokemon_list, 'pkgames': pkgames},\r\n )\r\n else:\r\n return render(request, 'pokemon/offertrade.html', {'error_message': \"User are prohibited to offer trade with the same user.\"})\r\n else:\r\n # For POST request\r\n data = request.POST\r\n pkData = data[\"pkData\"].split(\" - \")\r\n pokemon = Pokemon.objects.get(id=int(pkData[0]))\r\n offertrade = tradepost.offertrade_set.create(\r\n post_id=post_id,\r\n trader=user.username,\r\n pokemon_id=int(pkData[0]),\r\n pokemon_name=pkData[1],\r\n pokemon_types=pokemon.types,\r\n pokemon_abilities = pokemon.abilities,\r\n pokemon_gender=data[\"pkGender\"],\r\n pokemon_level=data[\"pkLevel\"],\r\n game = data['pkGame'],\r\n )\r\n offertrade.save()\r\n notify.send(user, recipient=User.objects.get(username=tradepost.trader), verb= user.username + \" has send an offer!\")\r\n\r\n return HttpResponseRedirect(reverse('pokemon:profile'))\r\n\r\n# TODO: Need to make a page to put offer and the post into a page (Accept or Decline page)\r\n# TODO: Template, View, (maybe model?)\r\ndef sentoffer(request, offer_id):\r\n offer = get_object_or_404(OfferTrade, pk=offer_id)\r\n user = request.user\r\n tradepost = offer.post\r\n\r\n if request.method == 'GET':\r\n if offer.state != '':\r\n return render(request, 'pokemon/sentoffer.html', {'error_message': 'Sorry this offer page has been removed.'})\r\n else:\r\n return render(request, 'pokemon/sentoffer.html', {'tradepost': tradepost, 'offer': offer})\r\n else:\r\n # Prevent two accepted offer\r\n if tradepost.traded_date is None:\r\n if 'accepted' in request.POST:\r\n print(\"offer notify\")\r\n notify.send(user, recipient=User.objects.get(username=offer.trader), verb= user.username + \" accepted your offer!\")\r\n \r\n offer.state = 'accepted' \r\n # Need to decline other offers inside the post.\r\n offers = tradepost.offertrade_set.filter(state='').exclude(pk=offer.pk)\r\n for curr_offer in offers:\r\n if curr_offer.trader != offer.trader:\r\n notify.send(user, recipient=User.objects.get(username=curr_offer.trader), verb=\"The post has been completed! Your offer will be removed.\")\r\n curr_offer.state = 'declined'\r\n curr_offer.save()\r\n tradepost.traded_date = timezone.now()\r\n tradepost.completed_by = offer.trader\r\n tradepost.save()\r\n elif 'declined' in request.POST:\r\n notify.send(user, recipient=User.objects.get(username=offer.trader), verb= user.username + \" declined your offer!\")\r\n offer.state = 'declined'\r\n offer.save()\r\n return HttpResponseRedirect(reverse('pokemon:profile'))\r\n # render(request, 'pokemon/profile.html', {'tradepost': TradePost.objects.filter(trader=user.username)})\r\n\r\ndef chat(request):\r\n\r\n # loader.get_template('pokemon/index.html')\r\n\r\n # return HttpResponse(\"Hello, world. You are at the Pokemon Marketplace.\")\r\n return render(request, \"pokemon/chat.html\")\r\n\r\n\r\ndef notification(request):\r\n\r\n # loader.get_template('pokemon/index.html')\r\n\r\n # return HttpResponse(\"Hello, world. You are at the Pokemon Marketplace.\")\r\n return render(request, \"pokemon/notification.html\")\r\n\r\n@login_required\r\ndef favourite(request, post_id):\r\n current_user = request.user\r\n if current_user.is_authenticated:\r\n if request.GET.get(\"Delete\"):\r\n Favourite.objects.filter(pk=post_id).delete()\r\n elif request.GET.get(\"favourite\"):\r\n username = current_user.username\r\n user_id = current_user.id\r\n fav = request.GET.get(\"favourite\")\r\n tradepost = TradePost.objects.get(pk=post_id)\r\n\r\n favourite = tradepost.favourite_set.create(\r\n post_id = post_id,\r\n trader_id = user_id,\r\n trader_name = username,\r\n )\r\n favourite.save()\r\n tradepost.save()\r\n\r\n favi = Favourite.objects.filter(trader_name=username).values()\r\n\r\n past_url = request.META['HTTP_REFERER'].split('?')[0]\r\n redirect_url = ':'.join(past_url.split('/')[3:-1])\r\n if redirect_url == 'pokemon': redirect_url = redirect_url + ':index' \r\n return HttpResponseRedirect(reverse(redirect_url))","repo_name":"aeriamike/pokemon_marketplace","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73500472906","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom misc import load_dataset\nfrom model import model\nfrom predict import predict\n\n# Loading the data (cat/non-cat)\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n\n# Example of a picture\nindex = 8\nplt.imshow(train_set_x_orig[index])\nplt.show()\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")\n\nm_train = train_set_x_orig.shape[0]\nm_test = test_set_x_orig.shape[0]\nnum_px = train_set_x_orig.shape[1]\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\n\n# Reshape the training and test examples\n\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))\n\ntrain_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.\n\nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)\n\n\n\n# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nplt.show()\n# print (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[d[\"Y_prediction_test\"][0,index]].decode(\"utf-8\") + \"\\\" picture.\")\n\n# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()\n\nlearning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()\n\nmy_image = \"cat_on_the_wall_1.jpg\" # change this to the name of your image file\n\n# We preprocess the image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nplt.show()\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")","repo_name":"DeepakSridhar/Deep-Learning-Coursera","sub_path":"Course-1/Logistic Regression with neural network/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"81"} +{"seq_id":"38579656686","text":"import pickle\r\nimport pandas as pd\r\nimport re\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.metrics import accuracy_score\r\nimport streamlit as st\r\n\r\n# Load the dataset\r\ncol = ['sentiment', 'id', 'date', 'flag', 'user', 'tweet']\r\ndf = pd.read_csv(\"D:/Projects/Project Sentiment Analysis/train.csv\", encoding='latin-1', names=col)\r\ndf.drop(['id', 'date', 'flag', 'user'], axis=1, inplace=True)\r\nprint(df.info())\r\n\r\ndf2=pd.read_csv(\"D:/Projects/Project Sentiment Analysis/test.csv\",names=col)\r\ndf2.drop(['id', 'date', 'flag', 'user'], axis=1, inplace=True)\r\nprint(df2.info())\r\n\r\ndf = pd.concat([df, df2], ignore_index=True)\r\nprint(df.info())\r\n# Function to remove hashtags, mentions, and URLs from a tweet\r\ndef data_preprocess(tweet):\r\n # Remove hashtags\r\n tweet = re.sub(r'#\\w+', '', tweet)\r\n\r\n # Remove @ mentions\r\n tweet = re.sub(r'@\\w+', '', tweet)\r\n\r\n # Remove URLs\r\n tweet = re.sub(r'http\\S+|www\\S+|https\\S+', '', tweet)\r\n\r\n return tweet\r\n\r\n# Preprocess the tweets\r\ndf['tweet'] = df['tweet'].apply(data_preprocess)\r\n\r\n# Split the dataset into training and testing sets\r\nX_train, X_test, y_train, y_test = train_test_split(df['tweet'], df['sentiment'], test_size=0.2, random_state=42)\r\n\r\n# Feature extraction using CountVectorizer\r\nvectorizer = CountVectorizer()\r\nX_train_counts = vectorizer.fit_transform(X_train)\r\nX_test_counts = vectorizer.transform(X_test)\r\n\r\n# Train the Naive Bayes classifier\r\nclassifier = MultinomialNB()\r\nclassifier.fit(X_train_counts, y_train)\r\n\r\n# Make predictions on the test set\r\npredictions = classifier.predict(X_test_counts)\r\n\r\n# Evaluate the accuracy of the classifier\r\naccuracy = accuracy_score(y_test, predictions)\r\nprint(\"Accuracy:\", accuracy)\r\n\r\n# Save the trained model\r\nwith open(\"model.pkl\", \"wb\") as file:\r\n pickle.dump(classifier, file)\r\nwith open(\"vectorizer.pkl\", \"wb\") as file:\r\n pickle.dump(vectorizer, file)\r\n\r\nprint(\"completed\")\r\n","repo_name":"sakp7/Senitment-Analysis-on-Twitter-dataset","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29556898954","text":"import inspect\nimport os\nimport shutil\nimport sys\nfrom functools import reduce, wraps\n\nimport click\nimport packaging.version as pkgv\nfrom click.exceptions import Exit # noqa: F401\nfrom schema import Optional, Schema\n\n\"\"\"\nThis file contains various click-related bits of vula. None of this is\nstrictly necessary; it is mostly used for debugging. It would be fine to remove\nthe cls=Debuggable from __main__.py and remove all of the DualUse\ndecorators from organize etc (using plain-old click.command to decorate the\norganize class should work) and the software would still run the way we intend\nit to be run in production.\n\nSo what do these magical click things do? They make it so that you can use -d\nto do a post-mortem when there are exceptions (except maybe not in glib\nthreads... sad). They also make it so that you can run class methods as\nsubcommands, and so that there are subcommands for accessing attributes.\n\nEg, this:\n vula organize sync\nwill instantiate an organize object, loads its state, and then call the\nobject's sync() method. (Note that the way we actually intend sync to be called\nis \"vula sync\" which calls Organize's sync method via dbus; this \"vula\norganize sync\" method is instantiating an organize object from its state file\nand calling the method on that.)\n\nOr, this: vula -d peer.Descriptor --addrs 10.168.128.160 --c\nvdDpRSGtsqvui8dox0iBq0SSp/zXSEU2dx5s5x+qcquSp0oIWgDuqJw50e9wrIuGub+SXzU0s5EIR\nH49QmNYDw== --dt\n86400 --e false --hostname wg-mdns-test3.local. --pk\nEqcQ5gYxzGtzg7B4xi83kLyfuSMp8Kv3cmAJMs12nDM= --port 5354 --r '' --s\nT6htsKgwCp5MAXjPiWxtVkccg+K2CePsVa7uyUgxE2ouYKXg2qNL+0ut3sSbVTYjzFGZSCO6n80SR\naR+BIeOCg== --vf\n1606276812 --vk 90Y5JGEjoklSDw51ffoHYXhWs49TTnCQ/D5yBbuf3Zg= valid\n\n...will instantiate a Descriptor object and verify that its signature is\ncorrect.\n\nNote that the first example does not require -d, but the second one does:\nautomatic resolution of dotted attribute paths only happens in the top-level\ncommand and only when debug mode is enabled. The first example, meanwhile, uses\nthe fact that the Organize class is a DualUse.\n\nAnother example which relies on Debuggable, and works with a function that\nisn't decorated at all (types are inferred from type annotations):\n sudo vula -d configure._reconfigure_restart_systemd_services --help\n\nOne more example, of chaining the attribute-getting functions:\n sudo vula organize state system_state current_subnets\n\nAnyway, if this gets in the way we can get rid of some or all of it.\n\"\"\"\n\n\nclass OrderedGroup(click.Group):\n def list_commands(self, ctx):\n return list(self.commands)\n\n\nclass Debuggable(OrderedGroup):\n\n \"\"\"\n This is a subclass of click.Group which adds a --debug option which enables\n two features which are useful for debugging:\n\n - It will drop to a pdb.post_mortem shell after any unhandled exception\n\n - It allows for automatic commandline access to any function annotated\n with basic types (str, int, maybe others?)\n\n To use it, just decorate with @Debuggable.command() where you would\n otherwise be using @click.group()\n \"\"\"\n\n def __init__(self, scope=None, **attrs):\n self.scope = scope or {}\n super(Debuggable, self).__init__(**attrs)\n self.params.append(\n click.Option(\n ('-d', '--debug/--no-debug'),\n show_default=True,\n hidden=not os.environ.get('DEBUG'),\n is_flag=True,\n default=(\n os.environ.get('DEBUG')\n and sys.stdin.isatty()\n and sys.stdout.isatty()\n ),\n help=\"Drop to a pdb.post_mortem shell upon uncaught exception \"\n \"(default True if DEBUG env var is set and stdin/out are \"\n \"ttys, False otherwise)\",\n )\n )\n\n def invoke(self, ctx):\n try:\n return super(Debuggable, self).invoke(ctx)\n except Exception as ex:\n if isinstance(ex, click.exceptions.ClickException):\n raise ex\n if isinstance(ex, click.exceptions.Exit):\n raise ex\n if ctx.params.get('debug'):\n import pdb\n import traceback\n\n tb = sys.exc_info()[2]\n traceback.print_tb(tb)\n print(\n \"stopping to allow inspecting exception:\\n\\n \"\n \"%r\\n\\ntype c to continue to \"\n \"post-mortem frame, or q to quit.\\n \" % (ex,)\n )\n pdb.set_trace()\n print(\"pdb.post_mortem on %r:\" % (ex,))\n pdb.post_mortem(tb)\n else:\n raise ex\n\n def get_command(self, ctx, command):\n\n if command in self.commands:\n return super(Debuggable, self).get_command(ctx, command)\n\n elif ctx.params.get('debug'):\n try:\n cmd = reduce(\n lambda a, b: a.get(b)\n if type(a) is dict\n else getattr(a, b),\n command.split('.'),\n self.scope,\n )\n except Exception as ex:\n print(ex)\n return None\n if isinstance(cmd, click.core.Command):\n return cmd\n elif hasattr(cmd, 'cli') and isinstance(\n cmd.cli, click.core.Command\n ):\n return cmd.cli\n elif callable(cmd):\n return _click_command_from_annotated_function(cmd)\n\n\ndef _click_command_from_annotated_function(cmd):\n\n \"\"\"\n This metaprogramming nonsense is only used for development, and hardly even\n that.\n\n There is actually a library called \"autoclick\" which presumably does a\n better job of doing what this function is doing.\n \"\"\"\n\n @click.command()\n def wrapped(**kw):\n print(cmd(**kw))\n\n spec = inspect.getfullargspec(cmd)\n none = object()\n defaults = list(spec.defaults or ())\n defaults = [none] * (len(spec.args) - len(defaults)) + defaults\n assert len(defaults) == len(spec.args), \"logic error\"\n for name, default in zip(spec.args, defaults):\n if default is none:\n wrapped = click.argument(name, type=spec.annotations[name])(\n wrapped\n )\n else:\n wrapped = click.option(\n '--' + name,\n show_default=True,\n default=default,\n type=spec.annotations[name],\n )(wrapped)\n return wrapped\n\n\nclass DualUse(click.Group):\n\n \"\"\"\n @DualUse.object() is a class decorator which enables class instances to be\n usable both as normal python objects and as click commandline functions.\n\n Methods which should be cli accessible should be decorated with\n @DualUse.method() or @property.\n \"\"\"\n\n def __init__(self, *a, **kw):\n callback = kw.pop('callback')\n\n @wraps(callback)\n @click.pass_context\n def wrapper(ctx, *a, **kw):\n instance = callback(*a, **kw)\n if 'magic_instance' not in ctx.meta.setdefault(\n self.callback.__name__, {}\n ):\n ctx.meta[self.callback.__name__]['magic_instance'] = instance\n return instance\n\n super(DualUse, self).__init__(callback=wrapper, *a, **kw)\n\n @property\n def all_commands(self):\n \"\"\"\n Return dictionary of DualUse methods and child classes, with\n self.commands applied on top of it.\n\n (FIXME: possibly wg.Interface is the only DualUse.object that actually\n uses self.commands/add_command? if so, this could be renamed\n 'commands' if the link commands were ported to a nested dualuse\n object...)\n \"\"\"\n res = {\n value.cli.name: value.cli\n for value in vars(self).values()\n if hasattr(value, 'cli') and value.cli is not self\n }\n res.update(**self.commands)\n return res\n\n def list_commands(self, ctx):\n return list(self.all_commands.keys()) + [\n name\n for name, value in vars(self.callback).items()\n if isinstance(value, property) and name != \"__wrapped__\"\n ]\n\n def get_command(self, ctx, name):\n \"\"\"\n This got way out of hand. The 'else' branch in this method is just used\n for debugging, and not for everyday use. It allows accessing attributes\n recursively, so you can say things like \"vula organize state\n system_state our_wg_pk\" and it will print the pk. But, for certain\n objects, it hits max recursion depth, and I haven't figured out why\n yet. I reserve the right to remove this unsupported magic in the\n future.\n \"\"\"\n res = self.all_commands.get(name)\n\n if res:\n return res\n\n else:\n\n @click.group(\n name=name,\n cls=type(self),\n invoke_without_command=True,\n help=\"Read %r property of %s object\"\n % (name, self.callback.__name__),\n )\n @click.pass_context\n class _property_printer(object):\n @property\n def value(self_):\n try:\n return getattr(\n ctx.meta[self.callback.__name__]['magic_instance'],\n name,\n )\n except Exception as ex:\n click.echo(ex)\n\n def __init__(self_, ctx):\n if ctx.invoked_subcommand is None:\n echo_maybepager(str(self_.value))\n\n def __getattr__(self_, name):\n return getattr(self_.value, name)\n\n _property_printer.callback.__name__ += ':' + name\n\n return _property_printer\n\n @classmethod\n def method(cls, opts=(), *a, **kw):\n \"\"\"\n Decorator to make methods of DualUse.object classes cli-accessible\n \"\"\"\n\n def decorator(f):\n @wraps(f)\n def wrapper(*a, **kw):\n ctx = click.get_current_context()\n instance = ctx.meta[f.__qualname__.split('.')[0]][\n 'magic_instance'\n ]\n res = f(instance, *a, **kw)\n if res:\n res = str(res)\n if res[-1] == '\\n':\n res = res[:-1]\n click.echo(res)\n\n wrapper.__doc__ = f.__doc__\n decos = opts + (click.command(*a, **kw),)\n wrapper = reduce(lambda a, b: b(a), decos, wrapper)\n f.cli = wrapper\n # note: returning undecorated function, which has click command\n # attached to it\n return f\n\n return decorator\n\n @classmethod\n def object(cls, *a, **kw):\n \"\"\"\n Decorator which installs an object instantiation cli in the 'cli'\n attribute of a class.\n \"\"\"\n\n def decorator(f):\n f.cli = wraps(f)(\n click.group(cls=cls, **kw)(schema2click_options(f))\n )\n return f\n\n return decorator\n\n\ndef _make_type(schema):\n class _type(click.ParamType):\n \"\"\"\n Note that these click types have awful looking type names currently, as\n the name is literally the whole definition. This will hopefully look\n better when we upgrade to the new version of the schema module which\n allows schemas to have proper names, so we won't need to see the whole\n schema DSL source in our --help output.\n \"\"\"\n\n name = str(schema)\n\n def convert(self, value, param, ctx):\n return Schema(schema).validate(value)\n\n return _type\n\n\ndef schema2click_options(f):\n if hasattr(f, 'schema'):\n for key, sub_schema in f.schema._schema.items():\n if type(key) is Optional:\n key = key._schema\n default = (getattr(f, 'default') or {}).get(key)\n _type = _make_type(sub_schema)\n f = click.option(\n \"--%s\" % (key,),\n type=_type(),\n default=default,\n show_default=True,\n )(f)\n return f\n\n\ndef red(s):\n \"\"\"\n Formats the given string 's' to red foreground color.\n\n >>> red_string = \"This text is red\"\n >>> print(red(red_string))\n \\x1b[31mThis text is red\\x1b[0m\n \"\"\"\n return click.style(s, fg=\"red\")\n\n\ndef green(s):\n \"\"\"\n Formats the given string 's' to green foreground color.\n\n >>> green_string = \"This text is green\"\n >>> print(green(green_string))\n \\x1b[32mThis text is green\\x1b[0m\n \"\"\"\n return click.style(s, fg=\"green\")\n\n\ndef blue(s):\n \"\"\"\n Formats the given string 's' to blue foreground color.\n\n >>> blue_string = \"This text is blue\"\n >>> print(blue(blue_string))\n \\x1b[34mThis text is blue\\x1b[0m\n \"\"\"\n return click.style(s, fg=\"blue\")\n\n\ndef yellow(s):\n \"\"\"\n Formats the given string 's' to yellow foreground color.\n\n >>> yellow_string = \"This text is yellow\"\n >>> print(yellow(yellow_string))\n \\x1b[33mThis text is yellow\\x1b[0m\n \"\"\"\n return click.style(s, fg=\"yellow\")\n\n\ndef bold(s):\n \"\"\"\n Formats the given string 's' to be bold.\n\n >>> bold_string = \"This text is bold\"\n >>> print(bold(bold_string))\n \\x1b[1mThis text is bold\\x1b[0m\n \"\"\"\n return click.style(s, bold=True)\n\n\ndef echo_maybepager(s):\n if s.count(\"\\n\") < shutil.get_terminal_size()[1]:\n click.echo(s)\n else:\n click.echo_via_pager(s)\n\n\ndef shell_complete_helper(fn):\n \"\"\"\n This is a helper to maintain compatibility with both click 7.x and 8.x.\n\n We could pass the old \"autocompletion\" argument to click 7.x but instead we\n pass nothing because autocompletion didn't work there anyway.\n \"\"\"\n if pkgv.parse(click.__version__) >= pkgv.parse('8.0.0'):\n return dict(shell_complete=fn)\n return {}\n","repo_name":"muelli/vula","sub_path":"vula/notclick.py","file_name":"notclick.py","file_ext":"py","file_size_in_byte":14075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12231900155","text":"from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QFileDialog,QAction, QMainWindow\nimport PyQt5.QtCore as QtCore\nfrom cellphy.Analysis.Track import Track\nfrom cellphy.Analysis.Channel import Channel\nimport pandas as pd\n\n\nclass IEDWidget(QMainWindow):\n track_clicked = QtCore.pyqtSignal(Track)\n display_msd_channel = QtCore.pyqtSignal(Channel)\n display_ied_channel = QtCore.pyqtSignal(Channel)\n\n def __init__(self, channel, parent=None):\n QMainWindow.__init__(self, parent)\n\n self.channel = channel\n self.table_widget = QTableWidget()\n self.tool_bar = self.addToolBar('IED ToolBar')\n self.setCentralWidget(self.table_widget)\n\n self.create_csv_act = QAction('Export IED')\n self.create_csv_act.triggered.connect(self.export_csv)\n self.tool_bar.addAction(self.create_csv_act)\n self.ied_headers = ['Time', 'Mean', 'Standard Deviation']\n\n self.create_dist_csv_act = QAction('Export Distance')\n self.create_dist_csv_act.triggered.connect(self.export_dist_csv)\n self.tool_bar.addAction(self.create_dist_csv_act)\n self.distance_headers = ['Time', 'Distance']\n\n self.prepare_table()\n\n def prepare_table(self):\n ied = self.channel.get_time_point_mean_and_stdev()\n packets = list(ied.values())\n #\n self.table_widget.setColumnCount(3)\n self.table_widget.setHorizontalHeaderLabels(self.ied_headers)\n for row, packet in enumerate(packets):\n self.table_widget.setRowCount(row+1)\n for col, val in enumerate(packet):\n table_item = QTableWidgetItem(str(val))\n self.table_widget.setItem(row, col, table_item)\n\n def export_csv(self):\n _csv = ''\n # get headers 1st\n _csv += ','.join(self.ied_headers) + '\\n'\n\n # now get the data\n for row in range(self.table_widget.rowCount()):\n row_vals = []\n for col in range(self.table_widget.columnCount()):\n row_vals.append(self.table_widget.item(row, col).text())\n _csv += ','.join(row_vals) + '\\n'\n\n file, _ = QFileDialog.getSaveFileName(self, \"Select file name IED .csv files\",\n QtCore.QDir.homePath(), \"CSV (*.csv)\")\n if file:\n fd = open(file, 'w')\n fd.write(_csv)\n fd.close()\n\n def export_dist_csv(self):\n # pass\n file, _ = QFileDialog.getSaveFileName(self, \"Select file name IED .csv files\",\n QtCore.QDir.homePath(), \"CSV (*.csv)\")\n _csv = \"\"\n _csv += ','.join(self.distance_headers) + '\\n'\n if file:\n distance_dict = self.channel.get_distance_between_pos_by_time()\n for time, pos in distance_dict.items():\n for d in pos:\n row = list()\n row.append(str(time))\n row.append(str(d))\n _csv += ','.join(row) + '\\n'\n\n fd = open(file, 'w')\n fd.write(_csv)\n fd.close()\n","repo_name":"zeroth/cellphy","sub_path":"cellphy/tracking_analyzer_gui/IEDWidget.py","file_name":"IEDWidget.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"39476492017","text":"import unittest\nfrom .active_reinforcement import Edge, utility, next_utility\n\n\nclass PerceptronTestCases(unittest.TestCase):\n def test_utility_a1(self):\n \"\"\"2. ZH 20.feladat\"\"\"\n edges = [\n Edge(0.5, -1), # nyíl s1-ből s1-be\n Edge(0.1, -5), # nyíl s1-ből s2-be\n Edge(0.1, -2.9), # nyíl s1-ből s3-ba\n Edge(0.3, 8), # nyíl s1-ből s3-ba\n ]\n expected_utility = utility(edges)\n self.assertAlmostEqual(expected_utility, 1.11, 2)\n\n def test_utility_a2(self):\n \"\"\"2. ZH 21.feladat\"\"\"\n edges = [\n Edge(0.3, -5), # nyíl s1-ből s1-be\n Edge(0.7, -1) # nyíl s1-ből s2-be\n ]\n expected_utility = utility(edges)\n self.assertAlmostEqual(expected_utility, -2.2, 1)\n\n def test_next_utility(self):\n \"\"\"2. ZH 23.feladat\"\"\"\n edges_a1 = [\n Edge(0.5, -1), # nyíl s1-ből s1-be\n Edge(0.1, -5), # nyíl s1-ből s2-be\n Edge(0.1, -2.9), # nyíl s1-ből s3-ba\n Edge(0.3, 8), # nyíl s1-ből s3-ba\n ]\n\n edges_a2 = [\n Edge(0.3, -5), # nyíl s1-ből s1-be\n Edge(0.7, -1) # nyíl s1-ből s2-be\n ]\n\n reward = -1.8\n leszam = 0.5\n\n nu = next_utility(\n reward,\n leszam,\n max(utility(edges_a1), utility(edges_a2))\n )\n\n self.assertAlmostEqual(nu, -1.245, 3)\n","repo_name":"bkrmendy/MIChecker","sub_path":"src/active_reinforcement_tests.py","file_name":"active_reinforcement_tests.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7858726886","text":"from itertools import product\nfrom treelib import Node, Tree\nimport torch\nfrom torch.nn.functional import softmax\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\nimport torch\nimport io\nfrom contextlib import redirect_stdout\n\n# Initialize the tokenizer and model from the HuggingFace Transformers library.\ntokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\nmodel = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n\n# Define the input sequence.\ninput_sequence = \"It is important for all countries to try harder to reduce carbon emissions because\"\n\n# Encode the input text.\ninput_ids = tokenizer(input_sequence, return_tensors=\"pt\").input_ids\n\n# Define arrays for different temperature and top_p values\ntemperature_values = [0.2, 0.5, 0.7, 1.0]\ntop_p_values = [0.1, 0.3, 0.5, 0.7, 0.9]\n\n# Dictionary to store trees for each combination of temperature and top_p\ntrees = {}\n\n# Loop over each combination of temperature and top_p values\nfor temperature, top_p in product(temperature_values, top_p_values):\n # Generate text using the model with the current combination of temperature and top_p\n outputs = model.generate(\n input_ids,\n do_sample=True,\n max_length=30,\n temperature=temperature,\n top_p=top_p,\n return_dict_in_generate=True,\n output_scores=True\n )\n\n # Retrieve the scores (logits) for each token generated\n generated_scores = outputs['scores']\n\n # Create a new tree for this combination\n tree = Tree()\n tree.create_node(f\"Root (Temp: {temperature}, Top-p: {top_p})\", \"root\") # Root node with params\n\n for i, score in enumerate(generated_scores):\n parent_id = f\"step_{i-1}_option_0\" if i > 0 else \"root\"\n\n # Apply softmax to convert logits to probabilities\n probs = torch.softmax(score, dim=-1)\n\n # Get the top 3 probabilities and their token indices\n top_probs, top_indices = torch.topk(probs, 3, dim=-1)\n\n for j in range(top_indices.size(1)):\n token_id = top_indices[0][j].item()\n prob = top_probs[0][j].item()\n token = tokenizer.decode([token_id]) # Decode the list of one token ID\n\n node_id = f\"step_{i}_option_{j}\"\n node_label = f\"{token} ({prob:.2f})\"\n tree.create_node(node_label, node_id, parent=parent_id)\n\n # Save the tree for this combination\n trees[(temperature, top_p)] = tree\n tree.save2file(\"tree-\"+str(temperature)+\"-\"+str(top_p)+\".txt\")\n","repo_name":"BrianQJN/Natrual_Language_Processing_Project","sub_path":"A4/A4_1_3.py","file_name":"A4_1_3.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72700829386","text":"from selenium import webdriver\nimport time\nimport requests\nfrom config import IMG_BB_API_KEY\nfrom selenium.webdriver.common.by import By\nimport base64\n\n# Downloading the image using Selenium\ndriver = webdriver.Chrome()\ndriver.get(\"https://www.digikey.com/en/products/detail/molex/2033900323/16920881\")\ntime.sleep(2)\nimg = driver.find_element(By.XPATH, \"/html/body/div[2]/main/div/div[1]/div[1]/div[1]/div/div[1]/div/img\")\nsrc = img.get_attribute('src')\ndriver.quit()\n\n# Saving the image to disk\nwith open(\"image.jpg\", \"wb\") as f:\n f.write(requests.get(src).content)\n\n# Uploading the image to free image hosting website\n\nwith open(\"image.jpg\", \"rb\") as file:\n url = \"https://api.imgbb.com/1/upload\"\n payload = {\n \"key\": IMG_BB_API_KEY,\n \"image\": base64.b64encode(file.read()),\n }\n res = requests.post(url, payload)\n\nres = requests.post(url, payload)\nres.raise_for_status()\n\n# Printing out the public URL for that image\nprint(res.json()['data']['url_viewer'])","repo_name":"bjkayani/airtable-component-inventory-uploader","sub_path":"image_upload_example.py","file_name":"image_upload_example.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38910306198","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.mlab as mlab\nimport matplotlib.colors as mcolors\nimport matplotlib.cm as cm\nfrom matplotlib import rc\nimport sys\n\nfilename1 = sys.argv[1]\nifi = int(sys.argv[2])\next = \".dat\"\nVlast = np.array([])\n\nfor i in range(1,ifi+1):\n\tname = \"%s%s%s\" % (filename1, str(i), ext)\n\tdata1 = np.genfromtxt(name)\n\tt1, V1 = data1[:,0], data1[:,1]\n\tVlast = np.append(Vlast, V1[-1])\n\nn, bins, patches = plt.hist(Vlast,50)\nmu = np.mean(Vlast)\nsigma = np.std(Vlast)\nplt.clf()\n\n# setup the normalization and the colormap\nnormalize = mcolors.Normalize(vmin=min(mlab.normpdf(bins, mu, sigma)), vmax=max(mlab.normpdf(bins, mu, sigma)))\ncolormap = cm.jet\n\nplt.rc('text',usetex=True)\nfor i in range(1,ifi+1):\n\tname = \"%s%s%s\" % (filename1, str(i), ext)\n\tdata1 = np.genfromtxt(name)\n\tt1, V1 = data1[:,0], data1[:,1]\n\ty1 = mlab.normpdf(V1[-1], mu, sigma)\n\tplt.plot(t1, V1, color=colormap(normalize(y1)))\n\n\n# setup the colorbar\nscalarmappaple = cm.ScalarMappable(norm=normalize, cmap=colormap)\n#scalarmappaple.set_array(alphavec)\nscalarmappaple.set_array(mlab.normpdf(bins, mu, sigma))\nclb = plt.colorbar(scalarmappaple)\nclb.ax.set_title(r'$\\rho$')\n\n# show the figure\nplt.xlabel(r't (segundos)')\nplt.ylabel(r'$V$ (volts)')\nplt.title(\"$\\mu = $ %.3e, $\\sigma = $ %.3e\" % (mu,sigma))\nplt.grid(True)\nplt.savefig('plotFillNorm.eps')\nplt.show()\n\n","repo_name":"volpatto/framework-incertezas","sub_path":"pos_processamento/fillNormal.py","file_name":"fillNormal.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22408858790","text":"from flask import Flask, render_template, redirect, request, session\napp = Flask(__name__)\napp.secret_key = 'secret1'\n# our index route will handle rendering our form\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/process', methods=['POST'])\ndef process():\n session['fullname'] = request.form['fullname']\n session['location'] = request.form['location']\n session['language'] = request.form['language']\n session['comment'] = request.form['comment']\n print(session['fullname'])\n print(session['location'])\n print(session['language'])\n print(session['comment'])\n return redirect ('/result')\n\n@app.route('/result')\ndef result():\n surveyResults = session\n return render_template('submitted_info.html', surveyResults=surveyResults)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"BrentCleary/Coding-Dojo_Winter_2023","sub_path":"Python/py_w1d5/dojo_survey/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23591401800","text":"# 11Function.py\n\ndef foo():\n name = input(\"Insert name : \")\n print(name , \"님 반갑습니다.\")\n\ndef bar():\n table = input(\" 몇 단을 원하나? \")\n print(table , \"단....\")\n\n# 인자 O, No Return\ndef sayHello(name):\n print(\"Hello Mr. \", name)\n print(\"Nice to Meet U\")\n\ndef add(a, b):\n return a + b\n\n\n#foo() # getYourName()\n#bar() # printMultiplication()\nsayHello(\"Kim\")\nsayHello(\"Lee\")\n\nvalue = add(1,2)\nprint(\"value = \", value)\n\n# 사용자로 부터 두 값을 입력받는다.\n# 11 + 22 = 33\n\ndef noReturn(name):\n if name == \"KIM\":\n print(\"안녕하세요.\")\n return\n else:\n print(\"Hello\")\n\n print(\"Nice to see u.\")\n return \"OK\"\n\nret = noReturn(\"KIM\")\nprint(type(ret))\n\nret = noReturn(\"LEE\")\nprint( type(ret) )\n\n# range(10)\n# range(0, 10)\n# range(0, 10, 1)\n\ndef myRange(start, end=0, step=1):\n print(\"start = \", start , \", end = \", end , \", step = \", step)\n\nmyRange(10)\nmyRange(10, 5)\nmyRange(10, 5, 3)\n\n\n# 가변 길이 인자\n# * : tuple\n# ** : Dict\ndef introFamily(name, *familyNames, **familyInfo):\n print(\"내 이름은 \", name, \"입니다.\")\n print(\"가족의 이름은 다음과 같다.\")\n for name in familyNames:\n print(name)\n print(\"-\" * 80)\n for key in familyInfo.keys():\n print(key, \":\", familyInfo[key])\n\nintroFamily(\"홍길동\", \"엄마\", \"아빠\", \"형\", \"동생\", \"할머니\", 주소=\"서울\", 가훈=\"투표하자\")\nintroFamily(\"이순신\", \"할머니\", \"할아버지\" , 위치=\"선릉\")\n\n","repo_name":"JoobeeJung/python","sub_path":"11Function.py","file_name":"11Function.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20726046078","text":"import community\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\n\n\n# ---- NetworkX compatibility\ndef node_iter(G):\n if float(nx.__version__)<2.0:\n return G.nodes()\n else:\n return G.nodes\n\ndef node_dict(G):\n if float(nx.__version__)>2.1:\n node_dict = G.nodes\n else:\n node_dict = G.node\n return node_dict\n# ---------------------------\n\n\ndef imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None, origin=None):\n from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n from matplotlib.figure import Figure\n\n fig = Figure(figsize=arr.shape[::-1], dpi=1, frameon=False)\n canvas = FigureCanvas(fig)\n fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin)\n fig.savefig(fname, dpi=1, format=format)\n\ndef plot_graph(plt, G):\n plt.title('num of nodes: '+str(G.number_of_nodes()), fontsize = 4)\n parts = community.best_partition(G)\n values = [parts.get(node) for node in G.nodes()]\n colors = []\n for i in range(len(values)):\n if values[i] == 0:\n colors.append('red')\n if values[i] == 1:\n colors.append('green')\n if values[i] == 2:\n colors.append('blue')\n if values[i] == 3:\n colors.append('yellow')\n if values[i] == 4:\n colors.append('orange')\n if values[i] == 5:\n colors.append('pink')\n if values[i] == 6:\n colors.append('black')\n plt.axis(\"off\")\n pos = nx.spring_layout(G)\n # pos = nx.spectral_layout(G)\n nx.draw_networkx(G, with_labels=True, node_size=4, width=0.3, font_size = 3, node_color=colors,pos=pos)\n\ndef draw_graph_list(G_list, row, col, fname = 'figs/test'):\n # draw graph view\n plt.switch_backend('agg')\n for i, G in enumerate(G_list):\n plt.subplot(row,col,i+1)\n plot_graph(plt, G)\n \n plt.tight_layout()\n plt.savefig(fname+'_view.png', dpi=600)\n plt.close()\n\n # draw degree distribution\n plt.switch_backend('agg')\n for i, G in enumerate(G_list):\n plt.subplot(row, col, i + 1)\n G_deg = np.array(list(G.degree(G.nodes()).values()))\n bins = np.arange(20)\n plt.hist(np.array(G_deg), bins=bins, align='left')\n plt.xlabel('degree', fontsize = 3)\n plt.ylabel('count', fontsize = 3)\n G_deg_mean = 2*G.number_of_edges()/float(G.number_of_nodes())\n plt.title('average degree: {:.2f}'.format(G_deg_mean), fontsize=4)\n plt.tick_params(axis='both', which='major', labelsize=3)\n plt.tick_params(axis='both', which='minor', labelsize=3)\n plt.tight_layout()\n plt.savefig(fname+'_degree.png', dpi=600)\n plt.close()\n\n # degree_sequence = sorted(nx.degree(G).values(), reverse=True) # degree sequence\n # plt.loglog(degree_sequence, 'b-', marker='o')\n # plt.title(\"Degree rank plot\")\n # plt.ylabel(\"degree\")\n # plt.xlabel(\"rank\")\n # plt.savefig('figures/degree_view_' + prefix + '.png', dpi=200)\n # plt.close()\n\n # draw clustering distribution\n #plt.switch_backend('agg')\n #for i, G in enumerate(G_list):\n # plt.subplot(row, col, i + 1)\n # G_cluster = list(nx.clustering(G).values())\n # bins = np.linspace(0,1,20)\n # plt.hist(np.array(G_cluster), bins=bins, align='left')\n # plt.xlabel('clustering coefficient', fontsize=3)\n # plt.ylabel('count', fontsize=3)\n # G_cluster_mean = sum(G_cluster) / len(G_cluster)\n # # if i % 2 == 0:\n # # plt.title('real average clustering: {:.4f}'.format(G_cluster_mean), fontsize=4)\n # # else:\n # # plt.title('pred average clustering: {:.4f}'.format(G_cluster_mean), fontsize=4)\n # plt.title('average clustering: {:.4f}'.format(G_cluster_mean), fontsize=4)\n # plt.tick_params(axis='both', which='major', labelsize=3)\n # plt.tick_params(axis='both', which='minor', labelsize=3)\n #plt.tight_layout()\n #plt.savefig(fname+'_clustering.png', dpi=600)\n #plt.close()\n\n ## draw circle distribution\n #plt.switch_backend('agg')\n #for i, G in enumerate(G_list):\n # plt.subplot(row, col, i + 1)\n # cycle_len = []\n # cycle_all = nx.cycle_basis(G)\n # for item in cycle_all:\n # cycle_len.append(len(item))\n\n # bins = np.arange(20)\n # plt.hist(np.array(cycle_len), bins=bins, align='left')\n # plt.xlabel('cycle length', fontsize=3)\n # plt.ylabel('count', fontsize=3)\n # G_cycle_mean = 0\n # if len(cycle_len)>0:\n # G_cycle_mean = sum(cycle_len) / len(cycle_len)\n # # if i % 2 == 0:\n # # plt.title('real average cycle: {:.4f}'.format(G_cycle_mean), fontsize=4)\n # # else:\n # # plt.title('pred average cycle: {:.4f}'.format(G_cycle_mean), fontsize=4)\n # plt.title('average cycle: {:.4f}'.format(G_cycle_mean), fontsize=4)\n # plt.tick_params(axis='both', which='major', labelsize=3)\n # plt.tick_params(axis='both', which='minor', labelsize=3)\n #plt.tight_layout()\n #plt.savefig(fname+'_cycle.png', dpi=600)\n #plt.close()\n\n ## draw community distribution\n #plt.switch_backend('agg')\n #for i, G in enumerate(G_list):\n # plt.subplot(row, col, i + 1)\n # parts = community.best_partition(G)\n # values = np.array([parts.get(node) for node in G.nodes()])\n # counts = np.sort(np.bincount(values)[::-1])\n # pos = np.arange(len(counts))\n # plt.bar(pos,counts,align = 'edge')\n # plt.xlabel('community ID', fontsize=3)\n # plt.ylabel('count', fontsize=3)\n # G_community_count = len(counts)\n # # if i % 2 == 0:\n # # plt.title('real average clustering: {}'.format(G_community_count), fontsize=4)\n # # else:\n # # plt.title('pred average clustering: {}'.format(G_community_count), fontsize=4)\n # plt.title('average clustering: {}'.format(G_community_count), fontsize=4)\n # plt.tick_params(axis='both', which='major', labelsize=3)\n # plt.tick_params(axis='both', which='minor', labelsize=3)\n #plt.tight_layout()\n #plt.savefig(fname+'_community.png', dpi=600)\n #plt.close()\n\n\ndef exp_moving_avg(x, decay=0.9):\n shadow = x[0]\n a = [shadow]\n for v in x[1:]:\n shadow -= (1-decay) * (shadow-v)\n a.append(shadow)\n return a\n\n\n","repo_name":"RexYing/diffpool","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","stars":450,"dataset":"github-code","pt":"81"} +{"seq_id":"40145338821","text":"def swapFile(file1,file2):\n fileA = open(file1)\n fileB = open(file2)\n\n dataA = fileA.read()\n dataB = fileB.read()\n\n fileAWrite = open(file1,'w')\n fileBWrite = open(file2,'w')\n\n fileAWrite.write(dataB)\n fileBWrite.write(dataA)\n\ninput1 = input(\"Which file would you like to swap?\")\ninput2 = input(\"Which file would you like to swap that file with?\")\n\nswapFile(input1,input2)\n#\\Users\\singgurung\\Documents\\Coding\\project98\\text1.txt\n#\\Users\\singgurung\\Documents\\Coding\\project98\\text2.txt","repo_name":"DankDuckz/FileSwap","sub_path":"fileSwap.py","file_name":"fileSwap.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8991606939","text":"import time\ntime.sleep(1)\nprint(\"\"\"\n\tHello welcome to simple calculator. Type your inputs and choose the operation you want to do.\n\tTo use the application, first input the value, that is, input1 and input2, then the operation you want to perform.\n\tExample:\n\t\tPlease input your first value here: 9\n\t\tPlease input your second value here: 8\n\t\tPlease input your operation here: *\n\tThe app will print 72 as the answer.\n\"\"\")\ntime.sleep(1)\ntry:\n\tdef calc():\n\t\ttry:\n\t\t\tinput1 = int(input(\"Please input your first value here: \"))\n\t\t\ttime.sleep(1)\n\t\t\tinput2 = int(input(\"Please input your second value here: \"))\n\t\t\ttime.sleep(1)\n\t\t\tprint(\"\"\"\n\t\t\t\tFor addition input +,\n\t\t\t\tFor subtraction input -,\n\t\t\t\tFor multiplication input *,\n\t\t\t\tFor division input /.\n\t\t\t\"\"\")\n\t\t\ttime.sleep(1)\n\t\t\tinput3 = input(\"Please input your operation: \")\n\t\t\ttime.sleep(1)\n\t\t\tif (input3 == \"+\"):\n\t\t\t\tsum = input1 + input2\n\t\t\t\ttime.sleep(0.5)\n\t\t\t\tprint(f\"Your answer is {sum}\")\n\t\t\telif (input3 == \"-\"):\n\t\t\t\tsub = input1 - input2\n\t\t\t\ttime.sleep(2)\n\t\t\t\tprint(f\"Your answer is {sub}\")\n\t\t\telif (input3 == \"*\"):\n\t\t\t\tmulti = input1 * input2\n\t\t\t\ttime.sleep(2)\n\t\t\t\tprint(f\"Your answer is {multi}\")\n\t\t\telif (input3 == \"/\"):\n\t\t\t\tdiv = input1 / input2\n\t\t\t\ttime.sleep(2)\n\t\t\t\tprint(f\"Your answer is {div}\")\n\t\t\telse:\n\t\t\t\tprint(\"Something went wrong\")\n\t\texcept:\n\t\t\ttime.sleep(2)\n\t\t\tprint(\"Your input must be a number, do you want to try again?\")\n\t\t\ttime.sleep(2)\n\t\t\tresponse = input(\"Type your response here(Y or n): \")\n\t\t\tif (response.lower() == \"y\" or response.lower() == \"yes\"):\n\t\t\t\ttime.sleep(2)\n\t\t\t\tcalc()\n\t\t\telse:\n\t\t\t\ttime.sleep(3)\n\t\t\t\tprint(\"Okay, you can come back anytime\")\nexcept:\n\ttime.sleep(3)\n\tprint(\"Something went wrong, close app and try again\")\ncalc()\n","repo_name":"Ephantus-Wambui/calculator","sub_path":"simple_calc.py","file_name":"simple_calc.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73772040264","text":"from src.config import get_config, get_base_path\nimport os\nimport logging\nimport pandas as pd\nfrom src.get_data import get_processed_data\nfrom sklearn.model_selection import train_test_split\n\nlogger = logging.getLogger('split_data')\n\n\ndef split_data(config_path: str = 'params.yaml') -> (pd.DataFrame, pd.DataFrame) or (None, None):\n config = get_config(config_path)\n train_path = config.get('split_data')['train_path']\n test_path = config.get('split_data')['test_path']\n if os.path.exists(train_path):\n try:\n logger.info('split data exists reading the present data')\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n return train_data, test_data\n except FileNotFoundError as e:\n logger.error(\"File not found\")\n logger.exception(e)\n return None, None\n\n try:\n logger.info('cannot find the existing data')\n logger.info('creating the split data')\n data = get_processed_data(config_path)\n split_ratio = config.get('split_data')['split_ratio']\n random_state = config.get('split_data')['random_state']\n train, test = train_test_split(data, test_size=split_ratio, random_state=random_state)\n logger.info('saving train test split data')\n base_path = get_base_path()\n train_path = os.path.join(base_path, train_path)\n test_path = os.path.join(base_path, test_path)\n train.to_csv(train_path, index=False)\n test.to_csv(test_path, index=False)\n return train, test\n except Exception as e:\n logger.error('error')\n logger.exception(e)\n return None, None\n\n\nif __name__ == '__main__':\n from src.set_logger import set_logger\n\n set_logger('test_split_data')\n train, test = split_data()\n\n","repo_name":"ArshadManer/Insurance_premium","sub_path":"src/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9737137748","text":"from api.permissions import AdminOnly\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.core.mail import send_mail\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import filters, permissions, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.templatetags.rest_framework import data\nfrom rest_framework.views import APIView\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom users.models import User\nfrom users.serializers import (GetTokenSerializer, NotAdminSerializer,\n SignUpSerializer, UsersSerializer)\n\n\nclass UsersViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UsersSerializer\n permission_classes = (IsAuthenticated, AdminOnly,)\n lookup_field = 'username'\n filter_backends = (filters.SearchFilter,)\n search_fields = ('username',)\n http_method_names = [\n 'get',\n 'post',\n 'patch',\n 'delete'\n ]\n\n @action(\n methods=['GET', 'PATCH'],\n detail=False,\n permission_classes=(IsAuthenticated,),\n url_path='me')\n def get_current_user_info(self, request):\n serializer = UsersSerializer(request.user)\n if request.method == 'PATCH':\n if request.user.is_admin:\n serializer = UsersSerializer(\n request.user,\n data=request.data,\n partial=True)\n else:\n serializer = NotAdminSerializer(\n request.user,\n data=request.data,\n partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.data)\n\n\nclass APIGetToken(APIView):\n \"\"\"\n Получение JWT-токена/ Адрес: 'v1/auth/token/'\n \"\"\"\n\n def send_email(self, user):\n confirmation_code = default_token_generator.make_token(user)\n subject = 'Код подтверждения'\n message = f'{confirmation_code} - ваш код для авторизации'\n admin_email = 'test@test.ru'\n user_email = [user.email]\n return send_mail(subject, message, admin_email, user_email)\n\n def post(self, request):\n user = User.objects.filter(\n username=request.data.get('username'),\n email=request.data.get('email')\n ).first()\n if user:\n send_mail(data)\n return Response(\n 'Код подтверждения отправлен вам на почту.',\n status=status.HTTP_200_OK)\n serializer = GetTokenSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n user = serializer.validated_data['username']\n code = serializer.data['confirmation_code']\n except User.DoesNotExist:\n return Response(\n {'username': 'Пользователь с таким именем не найден!'},\n status=status.HTTP_404_NOT_FOUND)\n user = get_object_or_404(User, username=user)\n if code == user.confirmation_code:\n token = RefreshToken.for_user(user).access_token\n return Response(\n {'token': str(token)},\n status=status.HTTP_200_OK)\n return Response(\n {'confirmation_code': 'Ошибка! Неверный код подтверждения!'},\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass APISignup(APIView):\n \"\"\"\n Получение письма с доступом для API\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n\n @staticmethod\n def send_email(user):\n confirmation_code = default_token_generator.make_token(user)\n subject = 'Код подтверждения'\n message = f'{confirmation_code} - ваш код для авторизации'\n admin_email = 'test@test.ru'\n user_email = [user.email]\n return send_mail(subject, message, admin_email, user_email)\n\n def post(self, request):\n user = User.objects.filter(\n username=request.data.get('username'),\n email=request.data.get('email')\n ).first()\n if user:\n self.send_email(user)\n return Response(\n 'Код отправлен на почту.', status=status.HTTP_200_OK)\n\n serializer = SignUpSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n self.send_email(user)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data,\n status=status.HTTP_200_OK,\n headers=headers\n )\n","repo_name":"nastasiatr/yamdb_final","sub_path":"api_yamdb/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17556666473","text":"import datetime\n\nimport streamlit as st\n\nfrom data.utils import build_country_data, check_if_aws_credentials_present\n\n\nclass Countries:\n def __init__(self, timestamp):\n self.country_data, self.last_modified, self.historical_country_data = (\n build_country_data()\n )\n self.countries = list(self.country_data.keys())\n self.default_selection = self.countries.index(\"Canada\")\n self.timestamp = timestamp\n\n @property\n def stale(self):\n delta = datetime.datetime.utcnow() - self.timestamp\n return delta > datetime.timedelta(hours=1)\n\n\n@st.cache\ndef fetch_country_data():\n check_if_aws_credentials_present()\n timestamp = datetime.datetime.utcnow()\n return Countries(timestamp=timestamp)","repo_name":"hackcollective/corona-calculator","sub_path":"data/countries.py","file_name":"countries.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"20251890360","text":"from typing import Iterator, Mapping, Sequence, Type\n\nfrom apischema.conversions.conversions import DefaultConversion\nfrom apischema.conversions.visitor import DeserializationVisitor\nfrom apischema.objects import ObjectField\nfrom apischema.objects.visitor import DeserializationObjectVisitor\nfrom apischema.types import AnyType\nfrom apischema.utils import get_origin_or_type\nfrom apischema.visitor import Unsupported\n\n\nclass InitFlattenedAliasVisitor(\n DeserializationObjectVisitor[Iterator[str]], DeserializationVisitor[Iterator[str]]\n):\n def mapping(\n self, cls: Type[Mapping], key_type: AnyType, value_type: AnyType\n ) -> Iterator[str]:\n yield from ()\n\n def object(self, tp: AnyType, fields: Sequence[ObjectField]) -> Iterator[str]:\n for field in fields:\n if field.flattened:\n yield from get_deserialization_flattened_aliases(\n get_origin_or_type(tp), field, self.default_conversion\n )\n elif not field.is_aggregate:\n yield field.alias\n\n def _visited_union(self, results: Sequence[Iterator[str]]) -> Iterator[str]:\n if len(results) != 1:\n raise NotImplementedError\n return results[0]\n\n\ndef get_deserialization_flattened_aliases(\n cls: Type, field: ObjectField, default_conversion: DefaultConversion\n) -> Iterator[str]:\n assert field.flattened\n try:\n yield from InitFlattenedAliasVisitor(default_conversion).visit_with_conv(\n field.type, field.deserialization\n )\n except (NotImplementedError, Unsupported):\n raise TypeError(\n f\"Flattened field {cls.__name__}.{field.name} must have an object type\"\n ) from None\n","repo_name":"wyfo/apischema","sub_path":"apischema/deserialization/flattened.py","file_name":"flattened.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"81"} +{"seq_id":"18900464124","text":"\"\"\"\nGiven two non-negative integers num1 and num2 represented as string, return the sum of num1 and num2.\n\nNote:\n\nThe length of both num1 and num2 is < 5100.\nBoth num1 and num2 contains only digits 0-9.\nBoth num1 and num2 does not contain any leading zero.\nYou must not use any built-in BigInteger library or convert the inputs to integer directly\n\"\"\"\n\nclass AddStrings:\n def addStrings(self, num1: str, num2: str) -> str:\n res = [] #res = ''\n\n carry = 0\n len1 = len(num1) - 1\n len2 = len(num2) - 1\n while len1 >= 0 or len2 >= 0:\n x1 = ord(num1[len1]) - ord('0') if len1 >= 0 else 0\n x2 = ord(num2[len2]) - ord('0') if len2 >= 0 else 0\n val = x1 + x2 + carry\n carry = val // 10\n val = val % 10\n res.append(val) #res += str(val)\n len1 -= 1\n len2 -= 1\n\n if carry > 0:\n res.append(carry) #res += str(carry)\n\n return ''.join(str(x) for x in res[::-1]) #res[::-1]\n","repo_name":"yangmingxuan/pythonalgorithms","sub_path":"string/AddStrings.py","file_name":"AddStrings.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74619328266","text":"import os\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom ..utils.logger import get_logger\n\nlogger = get_logger()\n\ndef save_checkpoint(\n outpath, model, optimizer=None,\n is_best=False, save_all=False, **kwargs\n ):\n\n if hasattr(model, 'module'):\n model = model.module\n\n state_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}\n state_dict.update(**kwargs)\n\n if not save_all:\n epoch = -1\n\n torch.save(\n obj=state_dict,\n f=os.path.join(outpath, f'checkpoint_{epoch}.pkl'),\n )\n\n if is_best:\n import shutil\n shutil.copy(\n os.path.join(outpath, f'checkpoint_{epoch}.pkl'),\n os.path.join(outpath, 'best_model.pkl'),\n )\n\ndef load_model(path):\n\n from .. import model\n from addict import Dict\n from ..data.tokenizer import Tokenizer\n\n checkpoint = torch.load(\n path, map_location=lambda storage, loc: storage\n )\n vocab_paths = checkpoint['args']['dataset']['vocab_paths']\n tokenizers = [Tokenizer(vocab_path=x) for x in vocab_paths]\n\n model_params = Dict(**checkpoint['args']['model'])\n model = model.Retrieval(**model_params, tokenizers=tokenizers)\n model.load_state_dict(checkpoint['model'])\n\n return model\n\ndef restore_checkpoint(path, model=None, optimizer=False):\n state_dict = torch.load(\n path, map_location=lambda storage, loc: storage\n )\n new_state = {}\n for k, v in state_dict['model'].items():\n new_state[k.replace('module.', '')] = v\n\n if model is None:\n from .. import model\n model_params = state_dict['args']['model_args']\n model = model.Retrieval(**model_params)\n\n model.load_state_dict(new_state)\n state_dict['model'] = model\n\n if optimizer:\n optimizer = state_dict['optimizer']\n state_dict['optimizer'] = None\n\n return state_dict\n\n\ndef get_tb_writer(logger_path):\n if logger_path == 'runs/':\n tb_writer = SummaryWriter()\n logger_path = tb_writer.file_writer.get_logdir()\n else:\n tb_writer = SummaryWriter(logger_path)\n return tb_writer\n\n\ndef get_device(gpu_id):\n if gpu_id >= 0:\n return torch.device('cuda:{}'.format(gpu_id))\n return torch.device('cpu')\n\n\ndef reset_pbar(pbar):\n from time import time\n pbar.n = 0\n pbar.last_print_n = 0\n pbar.start_t = time()\n pbar.last_print_t = time()\n pbar.update()\n return pbar\n\n\ndef print_tensor_dict(tensor_dict, print_fn):\n line = []\n for k, v in sorted(tensor_dict.items()):\n try:\n v = v.item()\n except AttributeError:\n pass\n line.append(f'{k.title()}: {v:10.6f}')\n print_fn(', '.join(line))\n\n\ndef set_tensorboard_logger(path):\n if path is not None:\n if os.path.exists(path):\n a = input(f'{path} already exists! Do you want to rewrite it? [y/n] ')\n if a.lower() == 'y':\n import shutil\n shutil.rmtree(path)\n tb_writer = get_tb_writer(path)\n else:\n exit()\n else:\n tb_writer = get_tb_writer(path)\n else:\n tb_writer = get_tb_writer()\n return tb_writer\n","repo_name":"jwehrmann/retrieval.pytorch","sub_path":"retrieval/utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"73720508745","text":"# -*- coding: utf-8 -*-\nimport wechatsogou\n\nws_api = wechatsogou.WechatSogouAPI()\n\n# 搜索特定的一组微信公众号\nwx_list = [\"医美圈\", \"医美视界\", \"皮秒\"]\n\n\n# 根据关键词搜索公众号的文章\ndef search_article():\n for l in wx_list:\n res = ws_api.get_gzh_article_by_history(l)\n article = res['article']\n for a in article:\n print('公众号:' + l)\n print('标题:' + a['title'])\n print('摘要:' + a['abstract'] + \"...\")\n print('文章链接:' + a['content_url'])\n if a['source_url'] == \"\":\n print('阅读原文链接:无')\n print('\\n')\n\n\nsearch_article()\n","repo_name":"hellodabin/weixinsearch","sub_path":"search_article_history.py","file_name":"search_article_history.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16348189265","text":"import os\nfrom setuptools import setup\nfrom jupyter_packaging import create_cmdclass\n\nVERSION = '0.0.1'\n\nsetup_args = dict(\n name='jupygloo',\n version=VERSION,\n description='Jupyter Server Extension for glootalk',\n python_requires='>=3.8',\n install_requires=[\n 'jupyter_server',\n 'glootalk',\n ],\n entry_points={\n 'console_scripts': [\n 'jupygloo=jupygloo.app:main'\n ]\n },\n include_package_data=True,\n)\n\nif __name__ == \"__main__\":\n setup(**setup_args)\n","repo_name":"anirrudh/glootalk","sub_path":"projects/jupygloo/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24154340791","text":"import pandas as pd\nfrom . import db\nfrom .models import School_Profiles_Data\n\n# Cycles available to add data\nVALID_CYCLES = [2024, 2023, 2022, 2021, 2020, 2019, 2018, 2017, 2016, 2015, 2014, 2013]\n# Current cycle to be used for explorer pages\nCURRENT_CYCLE = 2022\n\n# List of schools for adding to profile\ndef get_md_schools(country = None):\n '''Returns list of all available MD schools'''\n if country == 'USA':\n md_schools = School_Profiles_Data.query.filter_by(md_or_do='MD', country='USA').all()\n elif country == 'CAN':\n md_schools = School_Profiles_Data.query.filter_by(md_or_do='MD', country='CAN').all()\n else:\n md_schools = School_Profiles_Data.query.filter_by(md_or_do='MD').all()\n names = sorted([school.school for school in md_schools])\n return names\ndef get_do_schools():\n '''Returns list of all available DO schools'''\n do_schools = School_Profiles_Data.query.filter_by(md_or_do='DO').all()\n names = sorted([school.school for school in do_schools])\n return names\n\n# Options for cycle profile page\nSEX_OPTIONS = [\"Male\", \"Female\", \"Other\"]\nGENDER_OPTIONS = [\"Male\", \"Female\", \"Trans Male\", \"Trans Female\", \"Genderqueer\", \"Other\"]\nRACE_ETHNICITY_OPTIONS = [\"Hispanic/Latino/Spanish Origin\", \"American Indian/Alaskan Native\", \"Asian\",\n \"Black/African American\", \"Native Hawaiian/Pacific Islander\", \"White\"]\nSTATE_OPTIONS = [\"Alabama\", \"Alaska\", \"Arizona\", \"Arkansas\", \"California\", \"Colorado\", \"Connecticut\", \"Delaware\",\n \"District of Columbia\", \"Florida\",\"Georgia\", \"Hawaii\", \"Idaho\", \"Illinois\", \"Indiana\", \"Iowa\",\n \"Kansas\", \"Kentucky\", \"Louisiana\", \"Maine\",\"Maryland\", \"Massachusetts\", \"Michigan\", \"Minnesota\",\n \"Mississippi\", \"Missouri\", \"Montana\", \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\",\n \"New Mexico\", \"New York\", \"North Carolina\", \"North Dakota\",\"Ohio\",\"Oklahoma\", \"Oregon\", \"Pennsylvania\",\n \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\",\"Texas\", \"Utah\", \"Vermont\", \"Virginia\",\n \"Washington\", \"West Virginia\", \"Wisconsin\", \"Wyoming\", \"Puerto Rico\", \"Guam\", \"American Samoa\",\n \"Canada\", \"International\", \"Other\"]\n\n# Settings for visualizations page\nVIS_TYPES = [\"Line\", \"Bar\", \"Timeline\", \"Dot\", \"Sankey\", \"Map\"]\n\n# Settings for color palettes\nCOLOR_TYPES = [\"Default\",\"Okabe-Ito\",\"Tol\"]\n\n# Settings for map scopes\nMAP_TYPES = [\"USA\", \"North America\"]\n\n# Settings for organizing Y axis on timeline and dot plots\nORGANIZE_Y_OPTIONS = [\"Alphabetical\", \"Status\"]\n\n# Settings for user profile\nPROFILE_TYPES = [\"Public\", \"Private\"]\nBLOCK_TYPES = [\"Graph\",\"Text\"]\nFILTER_OPTIONS=[\"Primary Submitted\",\"Secondary Recieved\",\"Application Complete\",\"Interview Recieved\",\"Interview Complete\",\"Rejection\",\"Waitlist\",\"Acceptance\",\"Withdrawn\"]\n\n# School list import column types\nCOLUMN_TYPES = [\"School Name\", \"Primary Submitted/Verified\", \"Secondary Received\",\n \"Application Complete\", \"Interview Received\", \"Interview Date\", \"Rejected\", \"Waitlisted\", \"Accepted\",\n \"Withdrawn\"]\nCOLUMN_LABEL_CONVERT_SQL = {\"School Name\": \"name\", \"Primary Submitted/Verified\": \"primary\",\n \"Secondary Received\": \"secondary_received\",\n \"Application Complete\": \"application_complete\", \"Interview Received\": \"interview_received\",\n \"Interview Date\": \"interview_date\", \"Rejected\": \"rejection\", \"Waitlisted\": \"waitlist\",\n \"Accepted\": \"acceptance\",\n \"Withdrawn\": \"withdrawn\"}\n\n# Explorer options\nSTATES_WITH_SCHOOLS = [\"Alabama\", \"Arizona\", \"Arkansas\", \"California\", \"Colorado\", \"Connecticut\",\n \"District of Columbia\", \"Florida\",\"Georgia\", \"Hawaii\", \"Idaho\", \"Illinois\", \"Indiana\", \"Iowa\",\n \"Kansas\", \"Kentucky\", \"Louisiana\", \"Maine\",\"Maryland\", \"Massachusetts\", \"Michigan\", \"Minnesota\",\n \"Mississippi\", \"Missouri\", \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\",\n \"New Mexico\", \"New York\", \"North Carolina\", \"North Dakota\",\"Ohio\",\"Oklahoma\", \"Oregon\", \"Pennsylvania\",\n \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\",\"Texas\", \"Utah\", \"Vermont\", \"Virginia\",\n \"Washington\", \"West Virginia\", \"Wisconsin\", \"Puerto Rico\", \"Quebec\", \"Ontario\", \"Saskatchewan\"]\n\nSTATE_ABBREV = {\n \"Alabama\": \"AL\",\n \"Alaska\": \"AK\",\n \"Arizona\": \"AZ\",\n \"Arkansas\": \"AR\",\n \"California\": \"CA\",\n \"Colorado\": \"CO\",\n \"Connecticut\": \"CT\",\n \"Delaware\": \"DE\",\n \"Florida\": \"FL\",\n \"Georgia\": \"GA\",\n \"Hawaii\": \"HI\",\n \"Idaho\": \"ID\",\n \"Illinois\": \"IL\",\n \"Indiana\": \"IN\",\n \"Iowa\": \"IA\",\n \"Kansas\": \"KS\",\n \"Kentucky\": \"KY\",\n \"Louisiana\": \"LA\",\n \"Maine\": \"ME\",\n \"Maryland\": \"MD\",\n \"Massachusetts\": \"MA\",\n \"Michigan\": \"MI\",\n \"Minnesota\": \"MN\",\n \"Mississippi\": \"MS\",\n \"Missouri\": \"MO\",\n \"Montana\": \"MT\",\n \"Nebraska\": \"NE\",\n \"Nevada\": \"NV\",\n \"New Hampshire\": \"NH\",\n \"New Jersey\": \"NJ\",\n \"New Mexico\": \"NM\",\n \"New York\": \"NY\",\n \"North Carolina\": \"NC\",\n \"North Dakota\": \"ND\",\n \"Ohio\": \"OH\",\n \"Oklahoma\": \"OK\",\n \"Oregon\": \"OR\",\n \"Pennsylvania\": \"PA\",\n \"Rhode Island\": \"RI\",\n \"South Carolina\": \"SC\",\n \"South Dakota\": \"SD\",\n \"Tennessee\": \"TN\",\n \"Texas\": \"TX\",\n \"Utah\": \"UT\",\n \"Vermont\": \"VT\",\n \"Virginia\": \"VA\",\n \"Washington\": \"WA\",\n \"West Virginia\": \"WV\",\n \"Wisconsin\": \"WI\",\n \"Wyoming\": \"WY\",\n \"District of Columbia\": \"DC\",\n \"American Samoa\": \"AS\",\n \"Guam\": \"GU\",\n \"Northern Mariana Islands\": \"MP\",\n \"Puerto Rico\": \"PR\",\n \"United States Minor Outlying Islands\": \"UM\",\n \"U.S. Virgin Islands\": \"VI\",\n \"Alberta\": \"AB\",\n \"British Columbia\": \"BC\",\n \"Manitoba\": \"MB\",\n \"New Brunswick\": \"NB\",\n \"Newfoundland and Labrador\": \"NL\",\n \"Northwest Territories\": \"NT\",\n \"Nova Scotia\": \"NS\",\n \"Nunavut\": \"NU\",\n \"Ontario\": \"ON\",\n \"Prince Edward Island\": \"PE\",\n \"Quebec\": \"QC\",\n \"Saskatchewan\": \"SK\",\n \"Yukon\": \"YT\"\n}\n\nABBREV_TO_STATE = dict(map(reversed, STATE_ABBREV.items()))\n\n# GPA Calculator\nGRADE_OPTIONS = ['A+', 'A', 'A-', 'AB', 'B+', 'B', 'B-', 'BC', 'C+', 'C', 'C-', 'CD', 'D+', 'D', 'D-', 'DE', 'DF', 'E',\n 'F', 'AP (Tested Out)', 'AU (Audit)', 'Currently Taking', 'CR (Credit)', 'P (Pass/Fail)',\n 'F (Pass/Fail)', 'W (Withdrawn)', 'EX (Exempt)', 'Future']\n\nAMCAS_WEIGHT = {'A+': 4, 'A': 4, 'A-': 3.7, 'B+': 3.3, 'B': 3, 'B-': 2.7, 'C+': 2.3, 'C': 2, 'C-': 1.7, 'D+': 1.3,\n 'D': 1, 'D-': 0.7, 'F': 0, 'AB': 3.5, 'BC': 2.5, 'CD': 1.5, 'DE': 0.5, 'DF': 0.5}\n\nAACOMAS_WEIGHT = {'A+': 4, 'A': 4, 'A-': 3.7, 'B+': 3.3, 'B': 3, 'B-': 2.7, 'C+': 2.3, 'C': 2, 'C-': 1.7, 'D+': 1.3,\n 'D':1, 'D-': 0.7, 'F': 0, 'AB': 3.5, 'BC': 2.5, 'CD': 1.5, 'DE': 0.5, 'DF': 0.5}\n\nTMDSAS_WEIGHT = {'A+': 4, 'A': 4, 'A-': 4, 'B+': 3, 'B': 3, 'B-': 3, 'C+': 2, 'C': 2, 'C-': 2, 'D+': 1,\n 'D': 1, 'D-': 1, 'F': 0, 'AB': 3.5, 'BC': 2.5, 'CD': 1.5, 'DE': 0.5, 'DF': 0.5}\n\nCOURSE_CLASSIFICATIONS = sorted(['Biology', 'Chemistry', 'Physics', 'Math', 'Behavioral/Social Science', 'Business',\n 'Computer Science/Technology', 'Education', 'Engineering', 'English', 'Fine Arts',\n 'Foreign Language', 'Government', 'Health Science', 'History', 'Natural/Physical Science',\n 'Other', 'Philosophy/Religion'])\n\nAMCAS_SCIENCE = ['Biology', 'Chemistry', 'Physics', 'Math']\n\nCOURSE_TERMS = {0:'Summer', 1:'Fall', 2:'Winter', 3:'Spring'}\n\nCOURSE_YEARS = list(reversed([str(i) + '-' + str(i+1) for i in range(1950,VALID_CYCLES[0]+2)]))\n\nPROGRAM_TYPES = ['Undergraduate', 'Post-bac', 'Graduate']\n\n# Quarter conversion to semester hours\nAMCAS_QUARTER_CONVERSION = {0.5:0.3, 1:0.7, 1.5:1, 2:1.3, 2.5:1.7, 3:2, 3.5:2.3, 4:2.7, 4.5:3, 5:3.3, 6:4,\n 7:4.7, 8:5.3, 9:6, 10:6.7, 12:8, 15:10, 20:13.3}","repo_name":"RunningMSN/CycleTrack","sub_path":"website/form_options.py","file_name":"form_options.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"25301068865","text":"# INCOMPETANT ANTS!\r\n'''\r\nSimulation of ant pathfinding phenomenon. The ants leave the hive at regular\r\nintervals, depositing a pheromone trail as they go, to find sources of food.\r\nOnce a source of food has been found the return to the hive. Each ant is\r\nbiased to follow a pheromone trail which is constantly evaporating.\r\nI tried to create the following simulation with as few moving parts as\r\npossible, and kept the rule-sets for each ant binary as far as it was\r\nreasonable. This is the second version that worked reasonably well, however\r\nthere are several aspects which need to be worked on. More details at:\r\nhttps://medium.com/burningdaylight/ants-1-43812fd1ea0f\r\n\r\n'''\r\n\r\nfrom pylab import rcParams\r\nimport numpy as np\r\nrcParams['figure.figsize'] = 5,5\r\nfrom matplotlib import pyplot as plt\r\nfrom noise import pnoise1\r\n\r\n#%% TERRAIN SETUP\r\n\r\nBoxX = 200 #Width of terrain\r\nBoxY = 200 #Height of terrain\r\nenvironment = np.array([[0. for i in range(BoxX)] for j in range(BoxY)]) #Creating array for terrain\r\n\r\n'''\r\nEnvironment Codes:\r\nFood: -20\r\nPheramone trails: +ve number\r\nHome: -10\r\nNoGo: -30\r\n'''\r\n#Setting up the environment so that the ant-hill and food sources have a strong\r\n#pheromonal signature\r\nenvironment[5:15,95:106] = 1000\r\nenvironment[80:90,120:130] = 1000\r\nenvironment[81:91,71:81] = 1000\r\nplt.matshow(environment,cmap='hot',interpolation=None)\r\nplt.show()\r\n\r\ndecayFactor = 0.025 #Rate at which pheromone evaporates\r\n\r\n#Function to update the environment each iteration, can be changed to allow the food source to deplete\r\ndef EnvironmentUpdate():\r\n pheramonesHere = environment > 0\r\n environment[pheramonesHere] = environment[pheramonesHere] - decayFactor\r\n environment[5:15,95:106] = 1000\r\n environment[80:90,120:130] = 1000\r\n environment[81:91,71:81] = 1000\r\n\r\n#Creating an overlay for the ant-class to tell whether it is on food pixel or a ant-hill pixel\r\nenvironmentmap = np.array([[0. for i in range(BoxX)] for j in range(BoxY)])\r\nenvironmentmap[5:5,95:106] = -10\r\nenvironmentmap[80:90,120:130] = -20\r\nenvironmentmap[81:91,71:81] = -20\r\n\r\n#%% ANT CLASS\r\n\r\nclass ant():\r\n global environment\r\n global environmentmap\r\n\r\n def __init__(self): #Intialize ant randomly inside the colony\r\n self.biasX = 0\r\n self.biasY = 0\r\n self.locY = np.random.randint(5,high=16)\r\n self.locX = np.random.randint(95,high=106)\r\n self.state = 'EXPLORE'\r\n\r\n def updateLocation(self): #Used to shunt ant towards correct behaviour based on state\r\n if self.state == 'EXPLORE':\r\n self.explore()\r\n elif self.state == 'FOODRUN':\r\n self.foodRun()\r\n elif self.state == 'DEAD':\r\n self.dead()\r\n\r\n def checklocX(self,xpos): #What to do at X boundaries\r\n if xpos <= 0:\r\n self.state = 'DEAD'\r\n return 1\r\n if xpos >= BoxX-1:\r\n self.state = 'DEAD'\r\n return BoxX-2\r\n return xpos\r\n\r\n def checklocY(self,ypos): #What to do at Y boundaries\r\n if ypos <= 0:\r\n self.state = 'DEAD'\r\n return 1\r\n if ypos >= BoxY-1:\r\n self.state = 'DEAD'\r\n return BoxY-2\r\n return ypos\r\n\r\n def stepHere(self,xx,yy): #Flip states when found food or returned home after finding food\r\n xx = int(np.floor(xx))\r\n yy = int(np.floor(yy))\r\n\r\n if self.state == 'EXPLORE' and environmentmap[yy,xx] == -20:\r\n self.state = 'FOODRUN'\r\n\r\n if self.state == 'FOODRUN' and environmentmap[yy,xx] == -10:\r\n self.state = 'EXPLORE'\r\n\r\n def dead(self): #Respawns the ant inside the colony\r\n self.locY = np.random.randint(5,high=15)\r\n self.locX = np.random.randint(95,high=106)\r\n self.state = 'EXPLORE'\r\n\r\n def explore(self): #Food finding state\r\n self.setBias(self.locX,self.locY) #Set the bias parameters\r\n self.spreadPheramone(self.locX,self.locY,.5) #deposits 0.5 pheromone when exploring\r\n self.locX += np.random.randint(-5,6) + self.biasX #move randomly in X and add bias according to local pheromone environment\r\n self.locX = self.checklocX(self.locX) #Check if a X boundary was hit\r\n self.locY += np.random.randint(-5,6) + self.biasY + np.random.randint(5) #Move randomly in Y and add bias\r\n self.locY = self.checklocY(self.locY)#Check if a Y boundary was hit\r\n self.stepHere(self.locX,self.locY) #Check if the ant is at a food source or in the colony\r\n\r\n def foodRun(self): #Returning food to colony state\r\n self.setBias(self.locX,self.locY)\r\n self.spreadPheramone(self.locX,self.locY,4.) #deposits 4.0 pheromone when food is found\r\n self.locX += np.random.randint(-5,6) + self.biasX\r\n self.locX = self.checklocX(self.locX)\r\n self.locY += np.random.randint(-5,6) + self.biasY - np.random.randint(5)\r\n self.locY = self.checklocY(self.locY)\r\n self.stepHere(self.locX,self.locY)\r\n\r\n def spreadPheramone(self,xx,yy,amount): #Deposit pheromones on the environment\r\n xx = int(np.floor(xx))\r\n yy = int(np.floor(yy))\r\n environment[yy,xx] += amount\r\n\r\n def setBias(self,xx,yy): #Scan local environment for pheromones and set bias\r\n xscanpos = environment[yy,xx:xx+25]\r\n xscanneg = environment[yy,xx-25:xx]\r\n yscanpos = environment[yy:yy+25,xx]\r\n yscanneg = environment[yy-25:yy,xx]\r\n\r\n xscanpos = np.sum(xscanpos[xscanpos>0])\r\n xscanneg = np.sum(xscanneg[xscanneg>0])\r\n yscanpos = np.sum(yscanpos[yscanpos>0])\r\n yscanneg = np.sum(yscanneg[yscanneg>0])\r\n\r\n if xscanpos > xscanneg:\r\n self.biasX = np.random.randint(3)\r\n elif xscanneg > xscanpos:\r\n self.biasX = -1*np.random.randint(3)\r\n else:\r\n self.biasX = 0\r\n\r\n if yscanpos > yscanneg:\r\n self.biasY = np.random.randint(3)\r\n elif yscanneg > yscanpos:\r\n self.biasY = -1*np.random.randint(3)\r\n else:\r\n self.biasY = 0\r\n\r\n#%% RUN\r\nTime = 500\r\nAntz = [ant() for i in range(10)] #Intialize with 10 ants\r\nsplorelog = []\r\n\r\n#%%\r\nfor t in range(Time):\r\n if t%30 == 0: #Add 6 ants every 30 iterations\r\n Antz.append(ant())\r\n Antz.append(ant())\r\n Antz.append(ant())\r\n Antz.append(ant())\r\n Antz.append(ant())\r\n Antz.append(ant())\r\n\r\n if t%(10) == 0: #Plotting utility\r\n environmentoverlay = environment.copy()\r\n antx = [antman.locX for antman in Antz]\r\n anty = [antman.locY for antman in Antz]\r\n\r\n environmentoverlay[anty,antx] = 2\r\n\r\n explorenum = np.count_nonzero([antman.state == 'EXPLORE' for antman in Antz])\r\n allnum = len(Antz)\r\n splorelog.append(explorenum)\r\n\r\n plt.matshow(environmentoverlay,cmap='plasma',vmin=-5,vmax=10) #To view pheromones and environment\r\n plt.colorbar()\r\n plt.title('Exploring Now: ' + str(explorenum) + ' of ' + str(allnum))\r\n plt.show()\r\n\r\n# plt.hist2d(anty, antx, bins=50,cmap='hot',range = [[0, 200], [0, 200]],vmin=0,vmax=5) #To view locations of ants\r\n# plt.show()\r\n#\r\n# biasx = [antman.biasX for antman in Antz] #To track the biases of all the ants live\r\n# biasy = [antman.biasY for antman in Antz]\r\n# plt.hist2d(biasx, biasy, bins=11,cmap='hot',)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n print(t,str(len(Antz)))\r\n\r\n [antman.updateLocation() for antman in Antz] #Update location of each ant\r\n EnvironmentUpdate() #Update the environment\r\n","repo_name":"MirTunio/BurningDaylight","sub_path":"Ants/Ants7.py","file_name":"Ants7.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31245164647","text":"# Snake and Ladder Game\n# mindula dilthushan\n# minduladilthushan1@gmail.com\n# 22-04-23\n\nimport random\nimport time\n\nprint('Welcome Snake & Ladder Game !')\n\nsnakeAddress = {27: 5, 40: 3, 43: 18, 54: 31, 66: 45, 76: 58, 89: 53, 99: 41}\nladderAddress = {4: 25, 13: 46, 33: 49, 42: 63, 50: 69, 62: 81, 74: 92}\n\n\ndef ladder(data):\n if (data in ladderAddress.keys()):\n print(\"\\033[1m\" + \"ඉනිමගක් සෙට් වුනා 😁\" + \"\\033[0m\")\n return ladderAddress[data]\n else:\n return data\n\n\ndef snake(data):\n if (data in snakeAddress.keys()):\n print(\"\\033[1m\" + \"නයෙක් කැවෝ 🐍\" + \"\\033[0m\")\n return snakeAddress[data]\n else:\n return data\n\n\ndef game_end(data):\n return (data >= 100)\n\ndef game_start():\n player_name = \"\\033[1m\" + \"Mindula\" + \"\\033[0m\"\n while (True):\n random_data = random.randint(1, 6)\n print(\"දාදු කැටේ අගය :\", random_data)\n if (random_data == 1 or random_data == 6):\n game_process(player_name, random_data)\n else:\n print(\"\\033[1m\" + \"නැවත උත්සාහ කරන්න..\" + \"\\033[0m\")\n time.sleep(1)\n\ndef game_process(player_name_data, score_data):\n\n player_name = player_name_data\n score = score_data\n\n while (True):\n\n print(player_name, \"ඔයාගේ දැන් ලකුණු :\", score)\n\n time.sleep(1)\n random_data = random.randint(1, 6)\n print(\"දාදු කැටේ අගය :\", random_data)\n\n score += random_data\n score = ladder(score)\n score = snake(score)\n\n if (game_end(score)):\n print(player_name, \"ඔයා තරගය දිනුම්... සුභ පැතුම් !\")\n exit()\n\ngame_start()\n","repo_name":"Alern23/Snake-and-Ladder","sub_path":"snake_ladder_game.py","file_name":"snake_ladder_game.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"9435919538","text":"import gym\nfrom gym import spaces\nimport numpy as np\nimport logging\n\nlogger = logging.getLogger(__name__ + '.CustomEnv')\n\nclass CustomEnv(gym.Env):\n #metadata = {'render.modes' : ['human']}\n def __init__(self, pygame):\n super(CustomEnv,self).__init__()\n \n self.pygame = pygame\n self.car = pygame._car\n\n #self.action_space = spaces.Discrete(3)\n #self.observation_space = spaces.Box(np.array([0, 0, 0]), np.array([2, 2, 2]), dtype=np.int)\n \n # get action and observation-space from car?\n self.action_space = spaces.Discrete(self.car.action_space)\n self.observation_space = spaces.Box(np.array(self.car.observation_space[0]), \n np.array(self.car.observation_space[1]), \n dtype=np.int)\n \n #how many actions are possible?\n self.nActions = self.action_space.n\n #how many states exist in the observation space?\n self.nStates = 1\n high = self.observation_space.high\n low = self.observation_space.low\n for bounds_pair in zip(low, high): \n num_states = bounds_pair[1] - bounds_pair[0] + 1\n self.nStates*=num_states\n\n def reset(self):\n self.car.reset()\n self.pygame.reset()\n obs = self.pygame.observe()\n return np.array(obs)\n\n def step(self, action):\n self.pygame.action(action)\n obs = np.array(self.pygame.observe())\n reward = self.pygame.evaluate()\n done = self.pygame.is_done()\n __car_dicct = {\"X\": self.car._center[0], \"Y\": self.car._center[1], \"angle\": self.car._angle, \"speed\": self.car._speed, \"is alive\": self.car._is_alive, \"is crashed\": self.car._is_crashed, \"energy max\": self.car.energy_max, \"energy\": self.car.energy}\n __env_dicct = {}\n info = {'prob': 1, \"car\": __car_dicct, \"env\": __env_dicct}\n logger.debug('ENV-STEP: obs=\\'%s\\' \\t reward=\\'%s\\' \\t done=\\'%s\\' \\t info=\\'%s\\'', obs, reward, done, info)\n return obs, reward, done, info\n\n def render(self, mode=\"human\", close=False):\n self.pygame.view()\n","repo_name":"DevArchitectMaster/reinforcement_learning_ev3_framework_student","sub_path":"simulation/sim_world/envs/simulation_env.py","file_name":"simulation_env.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41011908875","text":"# In the transaction spreadsheet we have 3 columns transaction_id, product_id, price\r\n# In this spreadsheet, we record all types of transactions, but because of some error\r\n# the price here is wrong. Lets say we have to decrease the price by 10%.\r\n# So if we have to do it manually, then we have to type a formula and make changes\r\n# for all the rows in all the sheets. This manual process can be tedious.\r\n# Lets automate this process (and also add a chart)\r\n\r\nimport openpyxl as xl # alias to make our code shorter\r\nfrom openpyxl.chart import BarChart, Reference\r\n\r\ndef process_workbook(filename):\r\n\r\n wb = xl.load_workbook(filename) #workbook object\r\n sheet = wb['Sheet1']\r\n cell = sheet['a1'] # access cells using coordinates\r\n cell = sheet.cell(1, 1) # access cells using cell method by passing row and column\r\n print(cell.value)\r\n\r\n # no of rows in the spreadsheet\r\n print(sheet.max_row) # ans 4 rows\r\n\r\n ## iterate over the rows\r\n for row in range(2, sheet.max_row + 1): # to include 4th row also, we add 1 and leave 1st header row\r\n cell = sheet.cell(row, 3)\r\n print(cell.value)\r\n corrected_price = cell.value * 0.9 #reduce the price by 10%\r\n # add corrected prices to the new column\r\n corrected_price_cell = sheet.cell(row, 4)\r\n corrected_price_cell.value = corrected_price #update our spreadsheet\r\n\r\n ## add a chart (seelcting all values of 4th column)\r\n values = Reference(sheet,\r\n min_row=2,\r\n max_row=sheet.max_row,\r\n min_col=4,\r\n max_col=4)\r\n\r\n chart = BarChart()\r\n chart.add_data(values)\r\n sheet.add_chart(chart,'e2')\r\n\r\n newfile = 'transactions2.xlsx'\r\n wb.save(newfile) #save all the changes on a new file\r\n\r\n\r\nprocess_workbook('transactions.xlsx')","repo_name":"shubhranshi/mosh-python","sub_path":"00_Additional_Project/project_automate_excel_spreadsheet.py","file_name":"project_automate_excel_spreadsheet.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13283501850","text":"# -*- coding: utf8 -*-\n# Author: Wolf Honore\n\"\"\"Classes to handle differences in the Coqtop XML interface across versions\nand provide a uniform interface.\n\nhttps://github.com/coq/coq/blob/master/dev/doc/xml-protocol.md\n\"\"\"\n\n# xml.dom.minidom only needed for pretty printing.\nimport re\nimport subprocess\nimport xml.etree.ElementTree as ET\nfrom abc import ABCMeta, abstractmethod\nfrom enum import Enum\nfrom pathlib import Path\nfrom shutil import which\nfrom typing import (\n Any,\n Callable,\n Container,\n Dict,\n Iterable,\n Iterator,\n List,\n NamedTuple,\n Optional,\n Sequence,\n Tuple,\n Union,\n cast,\n)\nfrom xml.dom.minidom import parseString\nfrom xml.parsers.expat import ExpatError, errors\n\nPPTag = str\nTaggedToken = Tuple[str, Optional[PPTag]]\nGoal = NamedTuple(\n \"Goal\",\n [\n (\"hyp\", Sequence[Union[str, Sequence[TaggedToken]]]),\n (\"ccl\", Union[str, Sequence[TaggedToken]]),\n (\"name\", Optional[str]),\n ],\n)\nGoals = NamedTuple(\n \"Goals\",\n [\n (\"fg\", List[Goal]),\n (\"bg\", List[List[Goal]]),\n (\"shelved\", List[Goal]),\n (\"given_up\", List[Goal]),\n ],\n)\n\n\nclass GoalMode(Enum):\n \"\"\"Control the information returned by the Subgoals command.\"\"\"\n\n # Get the goal and hypotheses\n FULL = \"full\"\n # Get only the goal\n SHORT = \"short\"\n\n\nWARNING_RE = re.compile(\"^(Warning:[^]]+])$\", flags=re.MULTILINE)\n\n\nclass FindCoqtopError(Exception):\n \"\"\"An exception for when a coqtop executable could not be found.\"\"\"\n\n\n# Coqtop Response Types #\nclass Ok:\n \"\"\"A response representing success.\"\"\"\n\n def __init__(self, val: Any, msg: str = \"\") -> None:\n \"\"\"Initialize values.\"\"\"\n self.val = val\n self.msg = msg\n\n\nclass Err:\n \"\"\"A response representing failure.\"\"\"\n\n def __init__(self, msg: str, loc: Tuple[int, int] = (-1, -1)) -> None:\n \"\"\"Initialize values.\"\"\"\n self.msg = msg\n self.loc = loc\n\n\nResult = Union[Ok, Err]\n\n# The error in case of a timeout\nTIMEOUT_ERR = Err(\n \"Coq timed out. You can change the timeout with ct and try again.\"\n)\n\n# The error in case of an unexpected error (e.g., invalid XML)\nUNEXPECTED_ERR = Err(\n \"Coqtail experienced an unexpected error. \"\n \"Please report at https://github.com/whonore/Coqtail/issues.\"\n)\n\n\n# Helpers #\ndef unexpected(expected: Iterable[Any], got: Any) -> TypeError:\n \"\"\"Return an exception with a message showing what was expected.\"\"\"\n expect = \" or \".join(map(str, expected))\n return TypeError(f\"Expected {expect}, but got {str(got)}\")\n\n\nCHARMAP = {b\" \": b\" \", b\"'\": b\"'\", b\"(\": b\"(\", b\")\": b\")\"}\nBAD_BYTE = errors.codes[errors.XML_ERROR_INVALID_TOKEN] # pylint: disable=no-member\n\n\ndef _unescape(cmd: bytes) -> bytes:\n \"\"\"Replace escaped characters with the unescaped version.\"\"\"\n for escape, unescape in CHARMAP.items():\n cmd = cmd.replace(escape, unescape)\n return cmd\n\n\ndef _escape_byte(data: bytes, line: int, col: int) -> bytes:\n \"\"\"Escape an unprintable byte.\"\"\"\n lines = data.splitlines()\n bad = lines[line - 1][col]\n pre = lines[line - 1][:col]\n post = lines[line - 1][col + 1 :]\n lines[line - 1] = pre + f\"\\\\x{bad:02x}\".encode(\"utf-8\") + post\n return b\"\\n\".join(lines)\n\n\ndef _parse_tagged_tokens(\n tags: Container[PPTag],\n xml: ET.Element,\n stack: Optional[List[PPTag]] = None,\n inner: bool = False,\n) -> Iterator[Tuple[str, List[PPTag]]]:\n \"\"\"Scrape an XML element into a stream of text tokens and stack of tags.\n\n Helper function to parse_tagged_tokens.\n\n Written to support richpp tags, and thus supports .start and .end tags\n used by Coqtop to highlight ranges that are not properly nested\n (i.e., ......... is allowed).\n This is somewhat documented here: https://github.com/coq/coq/blob/master/dev/doc/xml-protocol.md#highlighting-text\n Documentation neglects to mention the semantics of start. and end. tags\n that are not self-closing.\n\n Until we get clarification, we will interpret\n foobar as foobar and\n foobar as foobar.\n \"\"\"\n pop_after = None\n if stack is None:\n stack = []\n\n # Check tag, see if we should modify stack\n if xml.tag.startswith(\"start.\"):\n _, _, tag = xml.tag.rpartition(\"start.\") # assert(tag != \"\")\n if tag in tags:\n # start. tag: push onto stack\n stack.insert(0, tag)\n\n elif xml.tag.startswith(\"end.\"):\n _, _, tag = xml.tag.rpartition(\"end.\") # assert(tag != \"\")\n if tag in tags:\n # end. tag: remove from stack (even if it's not at the top)\n pop_after = tag\n\n elif xml.tag in tags:\n # regular tag: push onto stack, but remember to pop it before xml.tail\n stack.insert(0, xml.tag)\n pop_after = xml.tag\n\n # Get text before first inner child\n if xml.text is not None:\n yield (xml.text, stack[:])\n\n # Recurse on children, with modified stack\n for child in xml:\n yield from _parse_tagged_tokens(tags, child, stack, True)\n\n if pop_after is not None:\n stack.remove(pop_after)\n\n # Get trailing text up to start of next tag, unless this is the outermost tag\n if inner and xml.tail is not None:\n yield (xml.tail, stack[:])\n\n\ndef parse_tagged_tokens(\n tags: Container[PPTag],\n xml: ET.Element,\n) -> Iterator[TaggedToken]:\n \"\"\"Scrape an XML element into a stream of text tokens and accompanying tags.\n\n Written to support richpp markup.\n Only considers tags specified by the tags parameter.\n \"\"\"\n token_acc, last_tag = \"\", None\n\n # Recursive helper _parse_tagged_tokens gives us tag stacks\n for token, tag_list in _parse_tagged_tokens(tags, xml):\n # Take top tag from tag stack, if any\n top_tag = tag_list[0] if tag_list != [] else None\n\n if top_tag == last_tag:\n # Join tokens whose top tag is the same\n token_acc += token\n else:\n yield (token_acc, last_tag)\n token_acc, last_tag = token, top_tag\n\n yield (token_acc, last_tag)\n\n\ndef join_tagged_tokens(tagged_tokens: Iterable[TaggedToken]) -> str:\n \"\"\"Join tokens from tagged token stream.\n\n NOTE:\n forall xml tags,\n join_tagged_tokens(parse_tagged_token(tags, xml)) = \"\".join(xml.itertext())\n \"\"\"\n return \"\".join(s for s, _ in tagged_tokens)\n\n\ndef partition_warnings(stderr: str) -> Tuple[str, str]:\n \"\"\"Partition Coq stderr messages into warnings and errors.\n\n Warnings are assumed to have the following form:\n Warning: message_with_newlines [warning_type]\\n\n Everything else is treated as an error message.\n \"\"\"\n warns: List[str] = []\n errs: List[str] = []\n # Strip whitespace and drop empty strings\n for msg in filter(None, map(str.strip, WARNING_RE.split(stderr))):\n (warns if WARNING_RE.fullmatch(msg) else errs).append(msg)\n return \"\\n\".join(warns), \"\\n\".join(errs)\n\n\n# Debugging #\ndef prettyxml(xml: bytes) -> str:\n \"\"\"Pretty print XML for debugging.\"\"\"\n xml = _unescape(xml)\n # See `XMLInterfaceBase.raw_response`.\n err_pos = (-1, -1)\n while True:\n try:\n return parseString(xml).toprettyxml()\n except ExpatError as e:\n if e.code != BAD_BYTE or (e.lineno, e.offset) <= err_pos:\n raise e\n err_pos = (e.lineno, e.offset)\n xml = _escape_byte(xml, *err_pos)\n\n\nclass XMLInterfaceBase(metaclass=ABCMeta):\n \"\"\"Provide methods and types common to all XML interface versions.\"\"\"\n\n # Coqtop Types #\n sentinel = object()\n\n # Option Type\n Some = NamedTuple(\"Some\", [(\"val\", Any)])\n CoqOption = Union[Some, None]\n\n # Union type\n Inl = NamedTuple(\"Inl\", [(\"val\", Any)])\n Inr = NamedTuple(\"Inr\", [(\"val\", Any)])\n CoqUnion = Union[Inl, Inr]\n\n # Types accepted by 'Set {option} {val}'\n OptionArg = Union[bool, int, str, Tuple[None, str]]\n\n def __init__(\n self,\n version: Tuple[int, int, int],\n str_version: str,\n coq_path: str,\n coq_prog: Optional[str],\n ) -> None:\n \"\"\"Initialize maps for converting between XML and Python values.\"\"\"\n self.version = version\n self.str_version = str_version\n\n # Coqtop launch arguments\n self.coq_path = coq_path\n assert coq_prog is not None\n self.coq_prog = coq_prog\n self.launch_args = [\"-ideslave\"]\n\n # Valid query commands\n self.queries = [\n \"Search\",\n \"SearchAbout\",\n \"SearchPattern\",\n \"SearchRewrite\",\n \"Check\",\n \"Print\",\n \"About\",\n \"Locate\",\n \"Show\",\n ]\n\n # Map from Python types to appropriate XML marshalling function\n self._to_py_funcs: Dict[str, Callable[[ET.Element], Any]] = {\n \"unit\": self._to_unit,\n \"bool\": self._to_bool,\n \"int\": self._to_int,\n \"string\": self._to_string,\n \"list\": self._to_list,\n \"pair\": self._to_pair,\n \"option\": self._to_option,\n \"union\": self._to_union,\n }\n\n # Inverse map\n self._of_py_funcs: Dict[str, Callable[[Any], ET.Element]] = {\n # Special case for tuple, must distinguish between 'unit' and\n # 'pair' by checking for '()'\n \"tuple\": lambda v: self._of_pair(v) if v else self._of_unit(v),\n \"bool\": self._of_bool,\n \"int\": self._of_int,\n \"str\": self._of_string,\n \"list\": self._of_list,\n \"Some\": self._of_option,\n \"NoneType\": self._of_option,\n \"Inl\": self._of_union,\n \"Inr\": self._of_union,\n }\n\n # Map from coqtop command to standardization function\n self._standardize_funcs: Dict[str, Callable[[Result], Result]] = {}\n\n # A command that can safely and quickly be executed just to get a new state id\n self.noop = \"Check Prop.\"\n\n # A flag indicating whether warnings printed to stderr are formatted in\n # the manner expected by partition_warnings\n self.warnings_wf = False\n\n def launch(self, filename: str, args: Iterable[str]) -> Tuple[str, ...]:\n \"\"\"The command to launch coqtop with the appropriate arguments.\"\"\"\n # Find the executable\n try:\n coqs = (\n p\n for p in (\n which(pre + self.coq_prog + ext, path=self.coq_path)\n for pre in (\"\", \"coq-prover.\")\n for ext in (\"\", \".opt\")\n )\n if p is not None\n )\n coq = next(coqs)\n except StopIteration as e:\n path = \"$PATH\" if self.coq_path is None else self.coq_path\n raise FindCoqtopError(\n f\"Could not find {self.coq_prog} in {path}. Perhaps you need \"\n \"to set g:coqtail_coq_path or g:coqtail_coq_prog.\"\n ) from e\n\n # Confirm the version matches\n version = parse_version(extract_version(coq))\n if version != self.version:\n raise FindCoqtopError(\n f\"{coq} version does not match version reported by coqc.\\n\"\n f\"Expected: {self.version} Got: {version}\"\n )\n\n return (\n (coq,)\n + tuple(self.launch_args)\n + self.topfile(filename, args)\n + tuple(args)\n )\n\n @staticmethod\n def topfile(filename: str, args: Iterable[str]) -> Tuple[str, ...]:\n \"\"\"The command to set the top-level module name.\"\"\"\n # pylint: disable=unused-argument\n # The arguments are only used in XMLInterface810 and greater.\n return ()\n\n @staticmethod\n def valid_module(filename: str) -> bool:\n \"\"\"Check if a file name is a valid module name.\"\"\"\n # Any string of word characters that doesn't start with a digit\n return re.fullmatch(r\"(?=\\D)\\w+\", Path(filename).stem) is not None\n\n # XML Parsing and Marshalling #\n def _to_unit(self, _xml: ET.Element) -> Tuple[()]:\n \"\"\"Expect: \"\"\"\n return ()\n\n def _of_unit(self, _val: Tuple[()]) -> ET.Element:\n \"\"\"Expect: ()\"\"\"\n return self._build_xml(\"unit\")\n\n def _to_bool(self, xml: ET.Element) -> bool:\n \"\"\"Expect: \"\"\"\n # pylint: disable=no-else-return\n val = xml.get(\"val\")\n\n if val == \"true\":\n return True\n elif val == \"false\":\n return False\n raise unexpected((\"true\", \"false\"), val)\n\n def _of_bool(self, val: bool) -> ET.Element:\n \"\"\"Expect: True | False\"\"\"\n return self._build_xml(\"bool\", str(val).lower())\n\n def _to_int(self, xml: ET.Element) -> int:\n \"\"\"Expect: int\"\"\"\n if xml.text is not None:\n return int(xml.text)\n raise unexpected((str,), None)\n\n def _of_int(self, val: int) -> ET.Element:\n \"\"\"Expect: int\"\"\"\n return self._build_xml(\"int\", text=str(val))\n\n def _to_string(self, xml: ET.Element) -> str:\n \"\"\"Expect: str\"\"\"\n return \"\".join(xml.itertext())\n\n def _of_string(self, val: str) -> ET.Element:\n \"\"\"Expect: str\"\"\"\n return self._build_xml(\"string\", text=val)\n\n def _to_list(self, xml: ET.Element) -> List[Any]:\n \"\"\"Expect: val val ...\"\"\"\n return [self._to_py(val) for val in xml]\n\n def _of_list(self, val: List[Any]) -> ET.Element:\n \"\"\"Expect: [val, val, ...]\"\"\"\n return self._build_xml(\"list\", children=val)\n\n def _to_pair(self, xml: ET.Element) -> Tuple[Any, Any]:\n \"\"\"Expect: val1 val2\"\"\"\n return (self._to_py(xml[0]), self._to_py(xml[1]))\n\n def _of_pair(self, val: Tuple[Any, Any]) -> ET.Element:\n \"\"\"Expect: (val1, val2)\"\"\"\n return self._build_xml(\"pair\", children=[val[0], val[1]])\n\n def _to_option(self, xml: ET.Element) -> \"CoqOption\":\n \"\"\"Expect: |